hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
bef59b2e7294c551405091200e389752d10f80bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdint.h>
extern "C" __global__ void vectorAdd(int *A, int *B, int *C, uint64_t N)
{
uint64_t i = (uint64_t)blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
|
bef59b2e7294c551405091200e389752d10f80bc.cu
|
#include <cuda.h>
#include <stdint.h>
extern "C" __global__ void vectorAdd(int *A, int *B, int *C, uint64_t N)
{
uint64_t i = (uint64_t)blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
|
ed0d5ef0364e20908b0cbe7c90d8acb3a990d176.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
auto* P =
Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.dim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.dim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.numel(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
auto* avg_loss =
Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss
if (!losses_.defined()) {
losses_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (losses_.numel() != N) {
losses_.Resize(N);
}
if (!rowmax_.defined()) {
rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (rowmax_.numel() != N) {
rowmax_.Resize(N);
}
if (!sum_multiplier_.defined()) {
sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
} else if (sum_multiplier_.numel() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
auto* P =
Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
CAFFE_ENFORCE_EQ(X.dim(), 4);
CAFFE_ENFORCE_EQ(T.dim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (!losses_.defined()) {
losses_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (losses_.numel() != N * W * H) {
losses_.Resize(N * W * H);
}
if (!weights_.defined()) {
weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (weights_.numel() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, W, H, Xdata, Pdata);
// Cross entropy
auto* avg_loss =
Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.numel(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
Tensor* dX;
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX = OutputTensorAlias(0, P);
dX->ResizeLike(X);
} else {
dX = Output(0, X.sizes(), at::dtype<float>());
}
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.dim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.dim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.numel(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.numel(), P.data<float>(), dX->template mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->numel(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->numel(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
Tensor* dX;
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX = OutputTensorAlias(0, P);
dX->ResizeLike(X);
} else {
dX = Output(0, X.sizes(), at::dtype<float>());
}
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.dim(), 4);
CAFFE_ENFORCE_EQ(T.dim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (!weights_.defined()) {
weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (weights_.numel() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.numel(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.numel(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->numel(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->numel(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
auto* P = Output(0, X.sizes(), at::dtype<float>());
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (!sum_multiplier_.defined()) {
sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
} else if (sum_multiplier_.numel() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (!scale_.defined()) {
scale_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (scale_.numel() != N) {
scale_.Resize(N);
}
if (!rowmax_.defined()) {
rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (rowmax_.numel() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
ed0d5ef0364e20908b0cbe7c90d8acb3a990d176.cu
|
#include <cfloat>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
auto* P =
Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.dim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.dim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.numel(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
auto* avg_loss =
Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss
if (!losses_.defined()) {
losses_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (losses_.numel() != N) {
losses_.Resize(N);
}
if (!rowmax_.defined()) {
rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (rowmax_.numel() != N) {
rowmax_.Resize(N);
}
if (!sum_multiplier_.defined()) {
sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
} else if (sum_multiplier_.numel() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
auto* P =
Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
CAFFE_ENFORCE_EQ(X.dim(), 4);
CAFFE_ENFORCE_EQ(T.dim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (!losses_.defined()) {
losses_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (losses_.numel() != N * W * H) {
losses_.Resize(N * W * H);
}
if (!weights_.defined()) {
weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (weights_.numel() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata);
// Cross entropy
auto* avg_loss =
Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.numel(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
Tensor* dX;
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX = OutputTensorAlias(0, P);
dX->ResizeLike(X);
} else {
dX = Output(0, X.sizes(), at::dtype<float>());
}
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.dim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.dim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.numel(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.numel(), P.data<float>(), dX->template mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->numel(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->numel(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
Tensor* dX;
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX = OutputTensorAlias(0, P);
dX->ResizeLike(X);
} else {
dX = Output(0, X.sizes(), at::dtype<float>());
}
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA));
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.dim(), 4);
CAFFE_ENFORCE_EQ(T.dim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (!weights_.defined()) {
weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA));
} else if (weights_.numel() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.numel(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.numel(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->numel(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->numel(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
auto* P = Output(0, X.sizes(), at::dtype<float>());
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (!sum_multiplier_.defined()) {
sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
} else if (sum_multiplier_.numel() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (!scale_.defined()) {
scale_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (scale_.numel() != N) {
scale_.Resize(N);
}
if (!rowmax_.defined()) {
rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA));
} else if (rowmax_.numel() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
f94b5109b2689d23b4eab8f3a4704686ed7e54cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
__global__ void histo(uint32_t* d_input, uint32_t* t_bin);
__global__ void convert(uint8_t* d_bin,uint32_t* t_bin);
void opt_2dhisto(uint32_t* d_input,uint8_t* d_bin,uint32_t *t_bin)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
dim3 dimgrid(INPUT_WIDTH,1,1);
dim3 dimblock(128,1,1);
//dim3 dimblock(512,1,1);
hipLaunchKernelGGL(( histo), dim3(dimgrid),dim3(dimblock), 0, 0, d_input,t_bin);
hipDeviceSynchronize();
dim3 dimgrid2(1+(HISTO_WIDTH*HISTO_HEIGHT-1)/256,1,1);
dim3 dimblock2(256,1,1);
hipLaunchKernelGGL(( convert), dim3(dimgrid2),dim3(dimblock2), 0, 0, d_bin,t_bin);
hipDeviceSynchronize();
}
/* Include below the implementation of any other functions you need */
__global__ void histo(uint32_t* d_input, uint32_t* t_bin)
{
const int globalTid = blockIdx.x*blockDim.x + threadIdx.x;
const int numThreads = blockDim.x*gridDim.x;
//there is a small risk here. When the size of the input is smaller than the size of the histogram,
//this may cause wrong solutions(several bins will have non-zero initial values). But in reality, size of histograms is always larger than size of input.
if (globalTid < HISTO_WIDTH*HISTO_HEIGHT)
{
t_bin[globalTid] = 0;
}
__syncthreads();
int input_size = INPUT_WIDTH*INPUT_HEIGHT;
//the idea of this loop comes from the slides, it's good to let threads executing multiple times for several inputs, rather than a single input.
//I guess this kind of memory access also helps in avoiding bank conflict (but I'm not sure)
for (int pos = globalTid; pos < input_size; pos+=numThreads)
{
int index=d_input[pos];
//abandoned code:
//atomicAdd(&(t_bin[index]),1);
//reason:
//instead of calling atomicAdd here directly, we can check whether the value has exceeded 255, this check can reduce the call of atomicAdd, and help a lot with the efficiency
// we can gain 3x performance here with the if statement
if (t_bin[index]<255)
{
atomicAdd(&(t_bin[index]),1);
}
}
}
__global__ void convert(uint8_t* d_bin,uint32_t* t_bin)
{
int globalid=blockIdx.x*blockDim.x+threadIdx.x;
// This check is not necessary now, with the if statement above, but I'd like to keep them. :)
d_bin[globalid]=(uint8_t)(t_bin[globalid]>255? 255:t_bin[globalid]);
}
void opt_2dhisto_init(uint32_t** input, uint32_t* (&d_input), uint8_t* (&d_bin), uint32_t* (&t_bin))
{
hipMalloc((void**) &d_input, INPUT_WIDTH*INPUT_HEIGHT*sizeof(uint32_t));
hipMalloc((void**) &d_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint8_t));
hipMalloc((void**) &t_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint32_t));
uint32_t* temp=d_input;
for(int i=0;i<INPUT_HEIGHT;i++)
{
hipMemcpy(temp,input[i],INPUT_WIDTH*sizeof(uint32_t),hipMemcpyHostToDevice);
temp+=INPUT_WIDTH;
}
}
void opt_2dhisto_finalize(uint32_t* &d_input,uint8_t* &d_bin,uint32_t* &t_bin, uint8_t* kernel_bin)
{
hipMemcpy(kernel_bin,d_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint8_t), hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_bin);
hipFree(t_bin);
}
|
f94b5109b2689d23b4eab8f3a4704686ed7e54cc.cu
|
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
__global__ void histo(uint32_t* d_input, uint32_t* t_bin);
__global__ void convert(uint8_t* d_bin,uint32_t* t_bin);
void opt_2dhisto(uint32_t* d_input,uint8_t* d_bin,uint32_t *t_bin)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
dim3 dimgrid(INPUT_WIDTH,1,1);
dim3 dimblock(128,1,1);
//dim3 dimblock(512,1,1);
histo<<<dimgrid,dimblock>>>(d_input,t_bin);
cudaThreadSynchronize();
dim3 dimgrid2(1+(HISTO_WIDTH*HISTO_HEIGHT-1)/256,1,1);
dim3 dimblock2(256,1,1);
convert<<<dimgrid2,dimblock2>>>(d_bin,t_bin);
cudaThreadSynchronize();
}
/* Include below the implementation of any other functions you need */
__global__ void histo(uint32_t* d_input, uint32_t* t_bin)
{
const int globalTid = blockIdx.x*blockDim.x + threadIdx.x;
const int numThreads = blockDim.x*gridDim.x;
//there is a small risk here. When the size of the input is smaller than the size of the histogram,
//this may cause wrong solutions(several bins will have non-zero initial values). But in reality, size of histograms is always larger than size of input.
if (globalTid < HISTO_WIDTH*HISTO_HEIGHT)
{
t_bin[globalTid] = 0;
}
__syncthreads();
int input_size = INPUT_WIDTH*INPUT_HEIGHT;
//the idea of this loop comes from the slides, it's good to let threads executing multiple times for several inputs, rather than a single input.
//I guess this kind of memory access also helps in avoiding bank conflict (but I'm not sure)
for (int pos = globalTid; pos < input_size; pos+=numThreads)
{
int index=d_input[pos];
//abandoned code:
//atomicAdd(&(t_bin[index]),1);
//reason:
//instead of calling atomicAdd here directly, we can check whether the value has exceeded 255, this check can reduce the call of atomicAdd, and help a lot with the efficiency
// we can gain 3x performance here with the if statement
if (t_bin[index]<255)
{
atomicAdd(&(t_bin[index]),1);
}
}
}
__global__ void convert(uint8_t* d_bin,uint32_t* t_bin)
{
int globalid=blockIdx.x*blockDim.x+threadIdx.x;
// This check is not necessary now, with the if statement above, but I'd like to keep them. :)
d_bin[globalid]=(uint8_t)(t_bin[globalid]>255? 255:t_bin[globalid]);
}
void opt_2dhisto_init(uint32_t** input, uint32_t* (&d_input), uint8_t* (&d_bin), uint32_t* (&t_bin))
{
cudaMalloc((void**) &d_input, INPUT_WIDTH*INPUT_HEIGHT*sizeof(uint32_t));
cudaMalloc((void**) &d_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint8_t));
cudaMalloc((void**) &t_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint32_t));
uint32_t* temp=d_input;
for(int i=0;i<INPUT_HEIGHT;i++)
{
cudaMemcpy(temp,input[i],INPUT_WIDTH*sizeof(uint32_t),cudaMemcpyHostToDevice);
temp+=INPUT_WIDTH;
}
}
void opt_2dhisto_finalize(uint32_t* &d_input,uint8_t* &d_bin,uint32_t* &t_bin, uint8_t* kernel_bin)
{
cudaMemcpy(kernel_bin,d_bin, HISTO_WIDTH*HISTO_HEIGHT*sizeof(uint8_t), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_bin);
cudaFree(t_bin);
}
|
7a514ad87af321ce592707241c6fd183df819aba.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <gtest/gtest.h>
#include <random>
#include <vector>
#include "dali/core/mm/async_pool.h"
#include "dali/core/dev_buffer.h"
#include "dali/core/mm/mm_test_utils.h"
#include "dali/core/cuda_stream.h"
#include "dali/core/mm/cuda_vm_resource.h"
namespace dali {
namespace mm {
struct GPUHog {
~GPUHog() {
if (mem) {
CUDA_DTOR_CALL(hipFree(mem));
mem = nullptr;
}
}
void init() {
if (!mem)
CUDA_CALL(hipMalloc(&mem, size));
}
void run(hipStream_t stream, int count = 1) {
for (int i = 0; i < count; i++) {
CUDA_CALL(hipMemsetAsync(mem, i+1, size, stream));
}
}
uint8_t *mem = nullptr;
size_t size = 16<<20;
};
TEST(MMAsyncPool, SingleStreamReuse) {
GPUHog hog;
hog.init();
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
test::test_device_resource upstream;
async_pool_resource<memory_kind::device> pool(&upstream);
stream_view sv(stream);
int size1 = 1<<20;
void *ptr = pool.allocate_async(size1, sv);
hog.run(stream, 2);
pool.deallocate_async(ptr, size1, sv);
void *p2 = pool.allocate_async(size1, sv);
CUDA_CALL(hipStreamSynchronize(stream));
EXPECT_EQ(ptr, p2);
}
TEST(MMAsyncPool, TwoStream) {
mm::test::test_device_resource upstream;
HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true);
HIPStreamMasqueradingAsCUDA s2 = HIPStreamMasqueradingAsCUDA::Create(true);
stream_view sv1(s1);
stream_view sv2(s2);
GPUHog hog;
hog.init();
const int min_success = 10;
const int max_not_busy = 100;
int stream_not_busy = 0;
int success = 0;
while (success < min_success) {
async_pool_resource<memory_kind::device> pool(&upstream);
void *p1 = pool.allocate_async(1000, sv1);
hog.run(s1);
pool.deallocate_async(p1, 1000, sv1);
void *p2 = pool.allocate_async(1000, sv2);
void *p3 = pool.allocate_async(1000, sv1);
hipError_t e = hipStreamQuery(s1);
if (e != hipErrorNotReady) {
std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n";
CUDA_CALL(hipGetLastError());
if (++stream_not_busy > max_not_busy) {
FAIL() << "Stream s1 finished - test unreliable.";
}
continue;
}
stream_not_busy = 0;
ASSERT_NE(p1, p2);
ASSERT_EQ(p1, p3);
CUDA_CALL(hipStreamSynchronize(s1));
success++;
CUDA_CALL(hipStreamSynchronize(s2));
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
namespace {
__global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) {
size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < size) {
if (static_cast<const uint8_t*>(ptr)[idx] != fill)
atomicAdd(failures, 1);
}
}
struct block {
void *ptr;
size_t size;
uint8_t fill;
hipStream_t stream;
};
template <typename Pool, typename Mutex>
void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, HIPStreamMasqueradingAsCUDA &stream,
int max_iters = 20000, bool use_hog = false) {
stream_view sv(stream);
std::mt19937_64 rng(12345);
std::poisson_distribution<> size_dist(1024);
const int max_size = 1 << 20;
std::uniform_int_distribution<> sync_dist(10, 10);
std::bernoulli_distribution action_dist;
std::bernoulli_distribution hog_dist(0.05f);
std::uniform_int_distribution<> fill_dist(1, 255);
DeviceBuffer<int> failure_buf;
int failures = 0;
failure_buf.from_host(&failures, 1, sv.get());
GPUHog hog;
if (use_hog)
hog.init();
int hogs = 0;
int max_hogs = sync_dist(rng);
CUDAEvent event = CUDAEvent::Create();
for (int i = 0; i < max_iters; i++) {
if (i == max_iters / 2)
pool.release_unused();
if (use_hog && hog_dist(rng)) {
if (hogs++ > max_hogs) {
CUDA_CALL(hipStreamSynchronize(stream));
max_hogs = sync_dist(rng);
}
hog.run(stream);
}
if (action_dist(rng) || blocks.empty()) {
size_t size;
do {
size = size_dist(rng);
} while (size > max_size);
uint8_t fill = fill_dist(rng);
void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size);
CUDA_CALL(hipMemsetAsync(ptr, fill, size, stream));
{
std::lock_guard<Mutex> guard(mtx);
(void)guard; // for dummy mutexes
blocks.push_back({ ptr, size, fill, stream });
}
} else {
block blk;
{
std::lock_guard<Mutex> guard(mtx);
(void)guard; // for dummy mutexes
if (blocks.empty())
continue;
int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng);
std::swap(blocks[i], blocks.back());
blk = blocks.back();
blocks.pop_back();
}
if (blk.stream != stream) {
if (stream) {
CUDA_CALL(hipEventRecord(event, blk.stream));
CUDA_CALL(hipStreamWaitEvent(stream, event, 0));
} else {
CUDA_CALL(hipStreamSynchronize(blk.stream));
}
}
hipLaunchKernelGGL(( Check), dim3(div_ceil(blk.size, 1024)), dim3(1024), 0, stream,
blk.ptr, blk.size, blk.fill, failure_buf);
if (stream) {
pool.deallocate_async(blk.ptr, blk.size, sv);
} else {
CUDA_CALL(hipStreamSynchronize(stream));
pool.deallocate(blk.ptr, blk.size);
}
}
}
copyD2H<int>(&failures, failure_buf, 1, AccessOrder(stream));
CUDA_CALL(hipStreamSynchronize(stream));
ASSERT_EQ(failures, 0);
}
} // namespace
TEST(MMAsyncPool, SingleStreamRandom) {
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
}
CUDA_CALL(hipStreamSynchronize(stream));
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) {
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
mm::test::test_device_resource upstream;
{
vector<block> blocks;
std::mutex mtx;
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
AsyncPoolTest(pool, blocks, mtx, stream);
}));
}
for (auto &t : threads)
t.join();
}
CUDA_CALL(hipStreamSynchronize(stream));
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
CUDA_CALL(hipStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream, false);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
// 0-th thread uses null stream, which triggers non-async API usage
HIPStreamMasqueradingAsCUDA stream = t ? HIPStreamMasqueradingAsCUDA::Create(true) : HIPStreamMasqueradingAsCUDA();
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream, 20000, true);
CUDA_CALL(hipStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, CrossStream) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream, false);
vector<std::thread> threads;
vector<HIPStreamMasqueradingAsCUDA> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = HIPStreamMasqueradingAsCUDA::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t]);
CUDA_CALL(hipStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, CrossStreamWithHogs) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
vector<HIPStreamMasqueradingAsCUDA> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = HIPStreamMasqueradingAsCUDA::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true);
CUDA_CALL(hipStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
#if DALI_USE_CUDA_VM_MAP
TEST(MM_VMAsyncPool, MultiThreadedSingleStreamRandom) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
{
vector<block> blocks;
std::mutex mtx;
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
AsyncPoolTest(pool, blocks, mtx, stream);
}));
}
for (auto &t : threads)
t.join();
}
}
TEST(MM_VMAsyncPool, MultiThreadedMultiStreamRandom) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
CUDA_CALL(hipStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, MultiStreamRandomWithGPUHogs) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
// 0-th thread uses null stream, which triggers non-async API usage
HIPStreamMasqueradingAsCUDA stream = t ? HIPStreamMasqueradingAsCUDA::Create(true) : HIPStreamMasqueradingAsCUDA();
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream, 20000, true);
CUDA_CALL(hipStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, CrossStream) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
vector<HIPStreamMasqueradingAsCUDA> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = HIPStreamMasqueradingAsCUDA::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t]);
CUDA_CALL(hipStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, CrossStreamWithHogs) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
vector<HIPStreamMasqueradingAsCUDA> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = HIPStreamMasqueradingAsCUDA::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true);
CUDA_CALL(hipStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
#endif
} // namespace mm
} // namespace dali
|
7a514ad87af321ce592707241c6fd183df819aba.cu
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <gtest/gtest.h>
#include <random>
#include <vector>
#include "dali/core/mm/async_pool.h"
#include "dali/core/dev_buffer.h"
#include "dali/core/mm/mm_test_utils.h"
#include "dali/core/cuda_stream.h"
#include "dali/core/mm/cuda_vm_resource.h"
namespace dali {
namespace mm {
struct GPUHog {
~GPUHog() {
if (mem) {
CUDA_DTOR_CALL(cudaFree(mem));
mem = nullptr;
}
}
void init() {
if (!mem)
CUDA_CALL(cudaMalloc(&mem, size));
}
void run(cudaStream_t stream, int count = 1) {
for (int i = 0; i < count; i++) {
CUDA_CALL(cudaMemsetAsync(mem, i+1, size, stream));
}
}
uint8_t *mem = nullptr;
size_t size = 16<<20;
};
TEST(MMAsyncPool, SingleStreamReuse) {
GPUHog hog;
hog.init();
CUDAStream stream = CUDAStream::Create(true);
test::test_device_resource upstream;
async_pool_resource<memory_kind::device> pool(&upstream);
stream_view sv(stream);
int size1 = 1<<20;
void *ptr = pool.allocate_async(size1, sv);
hog.run(stream, 2);
pool.deallocate_async(ptr, size1, sv);
void *p2 = pool.allocate_async(size1, sv);
CUDA_CALL(cudaStreamSynchronize(stream));
EXPECT_EQ(ptr, p2);
}
TEST(MMAsyncPool, TwoStream) {
mm::test::test_device_resource upstream;
CUDAStream s1 = CUDAStream::Create(true);
CUDAStream s2 = CUDAStream::Create(true);
stream_view sv1(s1);
stream_view sv2(s2);
GPUHog hog;
hog.init();
const int min_success = 10;
const int max_not_busy = 100;
int stream_not_busy = 0;
int success = 0;
while (success < min_success) {
async_pool_resource<memory_kind::device> pool(&upstream);
void *p1 = pool.allocate_async(1000, sv1);
hog.run(s1);
pool.deallocate_async(p1, 1000, sv1);
void *p2 = pool.allocate_async(1000, sv2);
void *p3 = pool.allocate_async(1000, sv1);
cudaError_t e = cudaStreamQuery(s1);
if (e != cudaErrorNotReady) {
std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n";
CUDA_CALL(cudaGetLastError());
if (++stream_not_busy > max_not_busy) {
FAIL() << "Stream s1 finished - test unreliable.";
}
continue;
}
stream_not_busy = 0;
ASSERT_NE(p1, p2);
ASSERT_EQ(p1, p3);
CUDA_CALL(cudaStreamSynchronize(s1));
success++;
CUDA_CALL(cudaStreamSynchronize(s2));
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
namespace {
__global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) {
size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < size) {
if (static_cast<const uint8_t*>(ptr)[idx] != fill)
atomicAdd(failures, 1);
}
}
struct block {
void *ptr;
size_t size;
uint8_t fill;
cudaStream_t stream;
};
template <typename Pool, typename Mutex>
void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, CUDAStream &stream,
int max_iters = 20000, bool use_hog = false) {
stream_view sv(stream);
std::mt19937_64 rng(12345);
std::poisson_distribution<> size_dist(1024);
const int max_size = 1 << 20;
std::uniform_int_distribution<> sync_dist(10, 10);
std::bernoulli_distribution action_dist;
std::bernoulli_distribution hog_dist(0.05f);
std::uniform_int_distribution<> fill_dist(1, 255);
DeviceBuffer<int> failure_buf;
int failures = 0;
failure_buf.from_host(&failures, 1, sv.get());
GPUHog hog;
if (use_hog)
hog.init();
int hogs = 0;
int max_hogs = sync_dist(rng);
CUDAEvent event = CUDAEvent::Create();
for (int i = 0; i < max_iters; i++) {
if (i == max_iters / 2)
pool.release_unused();
if (use_hog && hog_dist(rng)) {
if (hogs++ > max_hogs) {
CUDA_CALL(cudaStreamSynchronize(stream));
max_hogs = sync_dist(rng);
}
hog.run(stream);
}
if (action_dist(rng) || blocks.empty()) {
size_t size;
do {
size = size_dist(rng);
} while (size > max_size);
uint8_t fill = fill_dist(rng);
void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size);
CUDA_CALL(cudaMemsetAsync(ptr, fill, size, stream));
{
std::lock_guard<Mutex> guard(mtx);
(void)guard; // for dummy mutexes
blocks.push_back({ ptr, size, fill, stream });
}
} else {
block blk;
{
std::lock_guard<Mutex> guard(mtx);
(void)guard; // for dummy mutexes
if (blocks.empty())
continue;
int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng);
std::swap(blocks[i], blocks.back());
blk = blocks.back();
blocks.pop_back();
}
if (blk.stream != stream) {
if (stream) {
CUDA_CALL(cudaEventRecord(event, blk.stream));
CUDA_CALL(cudaStreamWaitEvent(stream, event, 0));
} else {
CUDA_CALL(cudaStreamSynchronize(blk.stream));
}
}
Check<<<div_ceil(blk.size, 1024), 1024, 0, stream>>>(
blk.ptr, blk.size, blk.fill, failure_buf);
if (stream) {
pool.deallocate_async(blk.ptr, blk.size, sv);
} else {
CUDA_CALL(cudaStreamSynchronize(stream));
pool.deallocate(blk.ptr, blk.size);
}
}
}
copyD2H<int>(&failures, failure_buf, 1, AccessOrder(stream));
CUDA_CALL(cudaStreamSynchronize(stream));
ASSERT_EQ(failures, 0);
}
} // namespace
TEST(MMAsyncPool, SingleStreamRandom) {
CUDAStream stream = CUDAStream::Create(true);
test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
}
CUDA_CALL(cudaStreamSynchronize(stream));
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) {
CUDAStream stream = CUDAStream::Create(true);
mm::test::test_device_resource upstream;
{
vector<block> blocks;
std::mutex mtx;
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
AsyncPoolTest(pool, blocks, mtx, stream);
}));
}
for (auto &t : threads)
t.join();
}
CUDA_CALL(cudaStreamSynchronize(stream));
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
CUDAStream stream = CUDAStream::Create(true);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
CUDA_CALL(cudaStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream, false);
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
// 0-th thread uses null stream, which triggers non-async API usage
CUDAStream stream = t ? CUDAStream::Create(true) : CUDAStream();
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream, 20000, true);
CUDA_CALL(cudaStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, CrossStream) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream, false);
vector<std::thread> threads;
vector<CUDAStream> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = CUDAStream::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t]);
CUDA_CALL(cudaStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
TEST(MMAsyncPool, CrossStreamWithHogs) {
mm::test::test_device_resource upstream;
{
async_pool_resource<memory_kind::device> pool(&upstream);
vector<std::thread> threads;
vector<CUDAStream> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = CUDAStream::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true);
CUDA_CALL(cudaStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n";
std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl;
upstream.check_leaks();
}
#if DALI_USE_CUDA_VM_MAP
TEST(MM_VMAsyncPool, MultiThreadedSingleStreamRandom) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
CUDAStream stream = CUDAStream::Create(true);
{
vector<block> blocks;
std::mutex mtx;
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
AsyncPoolTest(pool, blocks, mtx, stream);
}));
}
for (auto &t : threads)
t.join();
}
}
TEST(MM_VMAsyncPool, MultiThreadedMultiStreamRandom) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
CUDAStream stream = CUDAStream::Create(true);
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream);
CUDA_CALL(cudaStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, MultiStreamRandomWithGPUHogs) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
for (int t = 0; t < 10; t++) {
threads.push_back(std::thread([&]() {
// 0-th thread uses null stream, which triggers non-async API usage
CUDAStream stream = t ? CUDAStream::Create(true) : CUDAStream();
vector<block> blocks;
detail::dummy_lock mtx;
AsyncPoolTest(pool, blocks, mtx, stream, 20000, true);
CUDA_CALL(cudaStreamSynchronize(stream));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, CrossStream) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
vector<CUDAStream> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = CUDAStream::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t]);
CUDA_CALL(cudaStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
TEST(MM_VMAsyncPool, CrossStreamWithHogs) {
if (!cuvm::IsSupported())
GTEST_SKIP() << "Virtual memory management API is not supported on this machine.";
async_pool_resource<memory_kind::device, cuda_vm_resource> pool;
vector<std::thread> threads;
vector<CUDAStream> streams;
vector<block> blocks;
std::mutex mtx;
const int N = 10;
streams.resize(N);
for (int t = 0; t < N; t++) {
if (t != 0) // keep empty stream at index 0 to mix sync/async allocations
streams[t] = CUDAStream::Create(true);
threads.push_back(std::thread([&, t]() {
AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true);
CUDA_CALL(cudaStreamSynchronize(streams[t]));
}));
}
for (auto &t : threads)
t.join();
}
#endif
} // namespace mm
} // namespace dali
|
69db2d08490c7ae1d06f9c443566ab9755d016b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<cuda.h>
#include<iostream>
#include <chrono>
#include <random>
#include "matmul.cuh"
#include "matmul_kernel.cuh"
using namespace std;
float randomFloatWithin(float min, float max){
int some_seed = 111;
std::mt19937 generator(some_seed);
std::uniform_real_distribution<float> dist(min, max);
float pseudorandom_float = dist(generator);
return pseudorandom_float;
}
void initializeFloatArray(float *arr, float min, float max, unsigned int n){
std::random_device source;
std::mt19937_64 generator(source());
std::uniform_real_distribution<float> dist(min, max);
for (unsigned int i = 0; i < n;i++){
arr[i] = dist(generator);
}
}
void initializeDoubleArray(double* arr, double min, double max, unsigned int n) {
std::random_device source;
std::mt19937_64 generator(source());
std::uniform_real_distribution<double> dist(min, max);
for (unsigned int i = 0; i < n; i++) {
arr[i] = dist(generator);
}
}
void initializeIntArray(int* arr, int min, int max, unsigned int n) {
int mod = max - min + 1;
for (unsigned int i = 0; i < n; i++) {
arr[i] = rand() % mod + min;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
return 0;
}
int n = atoi(argv[1]);
int block_dim = atoi(argv[2]);
if (block_dim > 32){
block_dim = 32;
}
if (block_dim > n){
block_dim = n;
}
int* A_int = new int[n * n];
int* B_int = new int[n * n];
int* C_int = new int[n * n];
initializeIntArray(A_int, -10, 10, n * n);
initializeIntArray(B_int, -10, 10, n * n);
float* A_float = new float[n * n];
float* B_float = new float[n * n];
float* C_float = new float[n * n];
initializeFloatArray(A_float, -1.0, 1.0, n * n);
initializeFloatArray(B_float, -1.0, 1.0, n * n);
double* A_double = new double[n * n];
double* B_double = new double[n * n];
double* C_double = new double[n * n];
initializeDoubleArray(A_double, -1.0, 1.0, n * n);
initializeDoubleArray(B_double, -1.0, 1.0, n * n);
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
// int arrays
int* Ad_int;
int* Bd_int;
int* Cd_int;
hipMalloc((void**)&Ad_int, sizeof(int) * n * n);
hipMalloc((void**)&Bd_int, sizeof(int) * n * n);
hipMalloc((void**)&Cd_int, sizeof(int) * n * n);
hipMemcpy(Ad_int, A_int, sizeof(int) * n * n, hipMemcpyHostToDevice);
hipMemcpy(Bd_int, B_int, sizeof(int) * n * n, hipMemcpyHostToDevice);
hipEventRecord(startEvent, 0);
matmul_1(Ad_int, Bd_int, Cd_int, n, block_dim);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipDeviceSynchronize();
float elapsedTime_int;
hipEventElapsedTime(&elapsedTime_int, startEvent, stopEvent);
hipMemcpy(C_int, Cd_int, sizeof(int) * n * n, hipMemcpyDeviceToHost);
cout << C_int[0] << endl;
cout << C_int[n * n - 1] << endl;
cout << elapsedTime_int << endl;
hipFree(Ad_int);
hipFree(Bd_int);
hipFree(Cd_int);
// float arrays
float* Ad_float;
float* Bd_float;
float* Cd_float;
hipMalloc((void**)&Ad_float, sizeof(float) * n * n);
hipMalloc((void**)&Bd_float, sizeof(float) * n * n);
hipMalloc((void**)&Cd_float, sizeof(float) * n * n);
hipMemcpy(Ad_float, A_float, sizeof(float) * n * n, hipMemcpyHostToDevice);
hipMemcpy(Bd_float, B_float, sizeof(float) * n * n, hipMemcpyHostToDevice);
hipEventRecord(startEvent, 0);
matmul_2(Ad_float, Bd_float, Cd_float, n, block_dim);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipDeviceSynchronize();
float elapsedTime_float;
hipEventElapsedTime(&elapsedTime_float, startEvent, stopEvent);
hipMemcpy(C_float, Cd_float, sizeof(float) * n * n, hipMemcpyDeviceToHost);
cout << C_float[0] << endl;
cout << C_float[n * n - 1] << endl;
cout << elapsedTime_float << endl;
hipFree(Ad_float);
hipFree(Bd_float);
hipFree(Cd_float);
// double arrays
double* Ad_double;
double* Bd_double;
double* Cd_double;
hipMalloc((void**)&Ad_double, sizeof(double) * n * n);
hipMalloc((void**)&Bd_double, sizeof(double) * n * n);
hipMalloc((void**)&Cd_double, sizeof(double) * n * n);
hipMemcpy(Ad_double, A_double, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy(Bd_double, B_double, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipEventRecord(startEvent, 0);
matmul_3(Ad_double, Bd_double, Cd_double, n, block_dim);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipDeviceSynchronize();
float elapsedTime_double;
hipEventElapsedTime(&elapsedTime_double, startEvent, stopEvent);
hipMemcpy(C_double, Cd_double, sizeof(double) * n * n, hipMemcpyDeviceToHost);
cout << C_double[0] << endl;
cout << C_double[n * n - 1] << endl;
cout << elapsedTime_double << endl;
hipFree(Ad_double);
hipFree(Bd_double);
hipFree(Cd_double);
return 0;
}
|
69db2d08490c7ae1d06f9c443566ab9755d016b3.cu
|
#include<cuda.h>
#include<iostream>
#include <chrono>
#include <random>
#include "matmul.cuh"
#include "matmul_kernel.cuh"
using namespace std;
float randomFloatWithin(float min, float max){
int some_seed = 111;
std::mt19937 generator(some_seed);
std::uniform_real_distribution<float> dist(min, max);
float pseudorandom_float = dist(generator);
return pseudorandom_float;
}
void initializeFloatArray(float *arr, float min, float max, unsigned int n){
std::random_device source;
std::mt19937_64 generator(source());
std::uniform_real_distribution<float> dist(min, max);
for (unsigned int i = 0; i < n;i++){
arr[i] = dist(generator);
}
}
void initializeDoubleArray(double* arr, double min, double max, unsigned int n) {
std::random_device source;
std::mt19937_64 generator(source());
std::uniform_real_distribution<double> dist(min, max);
for (unsigned int i = 0; i < n; i++) {
arr[i] = dist(generator);
}
}
void initializeIntArray(int* arr, int min, int max, unsigned int n) {
int mod = max - min + 1;
for (unsigned int i = 0; i < n; i++) {
arr[i] = rand() % mod + min;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
return 0;
}
int n = atoi(argv[1]);
int block_dim = atoi(argv[2]);
if (block_dim > 32){
block_dim = 32;
}
if (block_dim > n){
block_dim = n;
}
int* A_int = new int[n * n];
int* B_int = new int[n * n];
int* C_int = new int[n * n];
initializeIntArray(A_int, -10, 10, n * n);
initializeIntArray(B_int, -10, 10, n * n);
float* A_float = new float[n * n];
float* B_float = new float[n * n];
float* C_float = new float[n * n];
initializeFloatArray(A_float, -1.0, 1.0, n * n);
initializeFloatArray(B_float, -1.0, 1.0, n * n);
double* A_double = new double[n * n];
double* B_double = new double[n * n];
double* C_double = new double[n * n];
initializeDoubleArray(A_double, -1.0, 1.0, n * n);
initializeDoubleArray(B_double, -1.0, 1.0, n * n);
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
// int arrays
int* Ad_int;
int* Bd_int;
int* Cd_int;
cudaMalloc((void**)&Ad_int, sizeof(int) * n * n);
cudaMalloc((void**)&Bd_int, sizeof(int) * n * n);
cudaMalloc((void**)&Cd_int, sizeof(int) * n * n);
cudaMemcpy(Ad_int, A_int, sizeof(int) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(Bd_int, B_int, sizeof(int) * n * n, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent, 0);
matmul_1(Ad_int, Bd_int, Cd_int, n, block_dim);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaDeviceSynchronize();
float elapsedTime_int;
cudaEventElapsedTime(&elapsedTime_int, startEvent, stopEvent);
cudaMemcpy(C_int, Cd_int, sizeof(int) * n * n, cudaMemcpyDeviceToHost);
cout << C_int[0] << endl;
cout << C_int[n * n - 1] << endl;
cout << elapsedTime_int << endl;
cudaFree(Ad_int);
cudaFree(Bd_int);
cudaFree(Cd_int);
// float arrays
float* Ad_float;
float* Bd_float;
float* Cd_float;
cudaMalloc((void**)&Ad_float, sizeof(float) * n * n);
cudaMalloc((void**)&Bd_float, sizeof(float) * n * n);
cudaMalloc((void**)&Cd_float, sizeof(float) * n * n);
cudaMemcpy(Ad_float, A_float, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(Bd_float, B_float, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent, 0);
matmul_2(Ad_float, Bd_float, Cd_float, n, block_dim);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaDeviceSynchronize();
float elapsedTime_float;
cudaEventElapsedTime(&elapsedTime_float, startEvent, stopEvent);
cudaMemcpy(C_float, Cd_float, sizeof(float) * n * n, cudaMemcpyDeviceToHost);
cout << C_float[0] << endl;
cout << C_float[n * n - 1] << endl;
cout << elapsedTime_float << endl;
cudaFree(Ad_float);
cudaFree(Bd_float);
cudaFree(Cd_float);
// double arrays
double* Ad_double;
double* Bd_double;
double* Cd_double;
cudaMalloc((void**)&Ad_double, sizeof(double) * n * n);
cudaMalloc((void**)&Bd_double, sizeof(double) * n * n);
cudaMalloc((void**)&Cd_double, sizeof(double) * n * n);
cudaMemcpy(Ad_double, A_double, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(Bd_double, B_double, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent, 0);
matmul_3(Ad_double, Bd_double, Cd_double, n, block_dim);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaDeviceSynchronize();
float elapsedTime_double;
cudaEventElapsedTime(&elapsedTime_double, startEvent, stopEvent);
cudaMemcpy(C_double, Cd_double, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
cout << C_double[0] << endl;
cout << C_double[n * n - 1] << endl;
cout << elapsedTime_double << endl;
cudaFree(Ad_double);
cudaFree(Bd_double);
cudaFree(Cd_double);
return 0;
}
|
d1d5eaaf821fc5b22bac9709882a93c08ef52f0d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Matrix_hip.cuh"
class HostMatrixDeleter {
public:
void operator()(float* ptr) const {
if (ptr) {
hipHostFree(ptr);
}
}
};
HostMatrix::HostMatrix(unsigned int height, unsigned int width): Matrix(height, width, false) {
float* rawElements = 0;
hipHostMalloc(&rawElements, height * width * sizeof(float));
elements = std::shared_ptr<float>(rawElements, HostMatrixDeleter());
}
float HostMatrix::getElement(unsigned int i, unsigned int j) const {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
return (elements.get())[i * width + j];
}
}
void HostMatrix::setElement(unsigned int i, unsigned int j, float value) {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
(elements.get())[i * width + j] = value;
}
}
class DeviceMatrixDeleter {
public:
void operator()(float* ptr) const {
if (ptr) {
hipFree(ptr);
}
}
};
DeviceMatrix::DeviceMatrix(unsigned int height, unsigned int width): Matrix(height, width, true) {
float* rawElements = 0;
hipMalloc(&rawElements, height * width * sizeof(float));
elements = std::shared_ptr<float>(rawElements, DeviceMatrixDeleter());
}
float DeviceMatrix::getElement(unsigned int i, unsigned int j) const {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
float value = 0.0f;
hipMemcpy(&value, elements.get() + i * width + j, sizeof(float), hipMemcpyDeviceToHost);
return value;
}
}
void DeviceMatrix::setElement(unsigned int i, unsigned int j, float value) {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
hipMemcpy(elements.get() + i * width + j, &value, sizeof(float), hipMemcpyHostToDevice);
}
}
|
d1d5eaaf821fc5b22bac9709882a93c08ef52f0d.cu
|
#include "Matrix.cuh"
class HostMatrixDeleter {
public:
void operator()(float* ptr) const {
if (ptr) {
cudaFreeHost(ptr);
}
}
};
HostMatrix::HostMatrix(unsigned int height, unsigned int width): Matrix(height, width, false) {
float* rawElements = 0;
cudaMallocHost(&rawElements, height * width * sizeof(float));
elements = std::shared_ptr<float>(rawElements, HostMatrixDeleter());
}
float HostMatrix::getElement(unsigned int i, unsigned int j) const {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
return (elements.get())[i * width + j];
}
}
void HostMatrix::setElement(unsigned int i, unsigned int j, float value) {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
(elements.get())[i * width + j] = value;
}
}
class DeviceMatrixDeleter {
public:
void operator()(float* ptr) const {
if (ptr) {
cudaFree(ptr);
}
}
};
DeviceMatrix::DeviceMatrix(unsigned int height, unsigned int width): Matrix(height, width, true) {
float* rawElements = 0;
cudaMalloc(&rawElements, height * width * sizeof(float));
elements = std::shared_ptr<float>(rawElements, DeviceMatrixDeleter());
}
float DeviceMatrix::getElement(unsigned int i, unsigned int j) const {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
float value = 0.0f;
cudaMemcpy(&value, elements.get() + i * width + j, sizeof(float), cudaMemcpyDeviceToHost);
return value;
}
}
void DeviceMatrix::setElement(unsigned int i, unsigned int j, float value) {
if (i >= height || j >= width) {
throw std::invalid_argument("Invalid argument.");
} else {
cudaMemcpy(elements.get() + i * width + j, &value, sizeof(float), cudaMemcpyHostToDevice);
}
}
|
7e48e9f117ad7ac13e250671a153776d2b320090.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Jueguito:
1) Sea h[i] el numero de granitos en el sitio i, 0<i<N-1.
2) Si h[i]>1 el sitio i esta "activo".
3) Al tiempo t, un sitio "activo" se "descarga" completamente tirando cada uno de sus granitos aleatoriamente y con igual probabilidad a la izquierda o a la derecha (el numero total de granitos entonces se conserva).
4) Los sitios se descargan sincronicamente. Entonces, a tiempo (t+1), el sitio activo i tendra h[i]=0 solo si sus vecinos no le tiraron granitos a tiempo t.
5) Se define la actividad A como el numero de sitios activos, es decir el numero de sitios que quieren descargarse.
Notar que si la densidad de granitos, [Suma_i h[i]/N] es muy baja, la actividad caera rapidamente a cero. Si la densidad es alta por otro lado, la actividad nunca cesara, ya que siempre habra sitios activos. En el medio hay una densidad "critica", para la cual la actividad decaera como una ley de potencia (pero se necesitaran sistemas grandes, y tiempos largos para verla bien definida).
*/
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "helper_cuda.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include <iostream>
#include <fstream>
#include <cstring>
#include <array>
#include <vector>
#include <cstdlib>
#include <random>
#include <cassert>
// number of sites
#define N (1024*1024) //TODO: se rompe todo si compils con -DN=123, cambiar de N a NSLOTS o algo as
#define SIZE (N * 4)
#define BLOCK_SIZE 256
#define THREAD_WORK 4
#define DENSITY 0.8924
// number of temporal steps
#define NSTEPS 10000
using namespace std;
typedef int * Manna_Array;
//fastest prng is XORWOW, default.
//~ #define hiprandState_t hiprandStatePhilox4_32_10_t //slower
//~ #define hiprandState_t hiprandStateMRG32k3a_t //slowest by far
__device__ hiprandState_t seed[1];
__device__ hiprandState_t rand_state[N];
__global__ void seedinit(int first_num){ //120ms, not top priority
hiprand_init(first_num,0,0,seed);
for(int i=1; i<N; i++) //must do it sequentially because of race conditions in hiprand(seed)
hiprand_init(hiprand(seed),0,0,rand_state+i);
}
__device__ static inline bool randbool(hiprandState_t *rand_state){
//~ return 1; //trick to fix behaviour
return 1&hiprand(rand_state); //TODO optimize perhaps?
}
// CONDICION INICIAL ---------------------------------------------------------------
/*
Para generar una condicion inicial suficientemente uniforme con una densidad
lo mas aproximada (exacta cuando N->infinito) al numero real DENSITY, podemos hacer asi:
*/
__global__ void inicializacion(Manna_Array __restrict__ h)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
h[i] = (int)((i+1)*DENSITY)-(int)(i*DENSITY);
}
__global__ void desestabilizacion_inicial(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if (h[gtid]) {
int k = (gtid+2*randbool(rand_state)-1+N)%N;
//~ int k = (gtid+2*((gtid%3)%2)-1+N)%N; //trick to fix behavior
atomicAdd(&dh[k], 1);
h[gtid] = 0;
}
}
__global__ void descargar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = THREAD_WORK * (blockIdx.x*blockDim.x + threadIdx.x);
assert(gtid<N);
//~ unsigned int tid = threadIdx.x; // id hilo dentro del bloque
//~ unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
//~ uint warp = tid / CUDA_WARP_SIZE; // warp dentro del bloque
//~ uint gwarp = gtid / CUDA_WARP_SIZE; // Identificador global de warp
//~ uint bid = blockIdx.x; // Identificador de bloque
if(gtid==0) *slots_activos=0;
hiprandState_t *thread_state = &rand_state[gtid]; //doesn't get better if I use a local copy and then copy back
int i=gtid;
//first 2 iterations must be protected
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
++i;
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
//mid iterations don't need protection
for(++i; i<gtid+THREAD_WORK-2; ++i){
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
++dh[k];
}
h[i] = 0;
}
}
//last 2 iterations must be protected
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
++i;
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
}
__global__ void actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ result)
{
unsigned int gtid = THREAD_WORK * (blockIdx.x*blockDim.x + threadIdx.x);
unsigned int tid = threadIdx.x; // id hilo dentro del bloque
unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
assert(gtid<N);
unsigned int local_result=0;
for(int i=gtid; i<gtid+THREAD_WORK; ++i){
h[i]+=dh[i];
dh[i]=0; //zeroes dh array
if(h[i]>1)
++local_result;
}
__shared__ unsigned int block_result;
block_result=0;
__syncthreads();
local_result += __shfl_down(local_result, 16);
local_result += __shfl_down(local_result, 8);
local_result += __shfl_down(local_result, 4);
local_result += __shfl_down(local_result, 2);
local_result += __shfl_down(local_result, 1);
if (0==lane) {
atomicAdd(&block_result, local_result);
}
__syncthreads();
if (0==tid) {
atomicAdd(result, block_result);
}
//~ atomicAdd(result, local_result);
}
__device__ Manna_Array h,dh;
__device__ unsigned int slots_activos;
//===================================================================
int main(){
ios::sync_with_stdio(0); cin.tie(0);
assert(N%(BLOCK_SIZE*THREAD_WORK)==0);
//random initialization
hipLaunchKernelGGL(( seedinit), dim3(1),dim3(1), 0, 0, time(NULL)); //initialize a state per thread with some random seed
getLastCudaError("seedinit failed");
//slots
checkCudaErrors(hipMalloc(&h, N*sizeof(int)));
checkCudaErrors(hipMalloc(&dh, N*sizeof(int)));
checkCudaErrors(hipMemset(dh, 0, N*sizeof(int)));
//gets actual address in device (&slots_activos is garbage)
unsigned int *slots_activos_addr;
hipGetSymbolAddress((void **)&slots_activos_addr, slots_activos);
//initialize slots
cout << "estado inicial estable de la pila de arena...";
hipLaunchKernelGGL(( inicializacion), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, h);
getLastCudaError("inicializacion failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
//create some chaos among slots
cout << "estado inicial desestabilizado de la pila de arena...";
hipLaunchKernelGGL(( desestabilizacion_inicial), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, h,dh,slots_activos_addr);
getLastCudaError("desestabilizacion failed");
hipLaunchKernelGGL(( actualizar), dim3(N/BLOCK_SIZE/THREAD_WORK), dim3(BLOCK_SIZE) , 0, 0, h,dh,slots_activos_addr);
getLastCudaError("actualizar failed");
cout << "LISTO\n";
cout << "evolucion de la pila de arena..."; cout.flush();
//~ cout<<N/(BLOCK_SIZE*THREAD_WORK)<<" "<<BLOCK_SIZE<<endl;
ofstream activity_out("activity.dat");
unsigned int activity;
int t = 0;
do {
hipLaunchKernelGGL(( descargar), dim3(N/(BLOCK_SIZE*THREAD_WORK)), dim3(BLOCK_SIZE) , 0, 0, h,dh,slots_activos_addr);
getLastCudaError("descargar failed");
hipLaunchKernelGGL(( actualizar), dim3(N/(BLOCK_SIZE*THREAD_WORK)), dim3(BLOCK_SIZE) , 0, 0, h,dh,slots_activos_addr);
getLastCudaError("actualizar failed");
checkCudaErrors(hipMemcpyFromSymbol(&activity, slots_activos, sizeof(unsigned int)));
activity_out << activity << "\n";
#ifdef DEBUG
imprimir_array(h);
#endif
++t;
} while(activity > 0 && t < NSTEPS); // si la actividad decae a cero, esto no evoluciona mas...
cout << "LISTO: " << ((activity>0)?("se acabo el tiempo\n\n"):("la actividad decayo a cero\n\n")); cout.flush();
//free everything
hipFree(h);
hipFree(dh);
hipFree(rand_state);
hipFree(seed);
return 0;
}
/*
* TODO:
* Try more work per thread. Change algorithm to get rid of many atomicAdd
* make N and BLOCK_SIZE defineable during compile time
* try normal distribution with: int hiprand_discrete(hiprandState_t *state, hiprandDiscreteDistribution_t discrete_distribution)
*/
|
7e48e9f117ad7ac13e250671a153776d2b320090.cu
|
/*
Jueguito:
1) Sea h[i] el numero de granitos en el sitio i, 0<i<N-1.
2) Si h[i]>1 el sitio i esta "activo".
3) Al tiempo t, un sitio "activo" se "descarga" completamente tirando cada uno de sus granitos aleatoriamente y con igual probabilidad a la izquierda o a la derecha (el numero total de granitos entonces se conserva).
4) Los sitios se descargan sincronicamente. Entonces, a tiempo (t+1), el sitio activo i tendra h[i]=0 solo si sus vecinos no le tiraron granitos a tiempo t.
5) Se define la actividad A como el numero de sitios activos, es decir el numero de sitios que quieren descargarse.
Notar que si la densidad de granitos, [Suma_i h[i]/N] es muy baja, la actividad caera rapidamente a cero. Si la densidad es alta por otro lado, la actividad nunca cesara, ya que siempre habra sitios activos. En el medio hay una densidad "critica", para la cual la actividad decaera como una ley de potencia (pero se necesitaran sistemas grandes, y tiempos largos para verla bien definida).
*/
#include <cuda.h>
#include "cuda_runtime.h"
#include "helper_cuda.h"
#include "curand.h"
#include "curand_kernel.h"
#include <iostream>
#include <fstream>
#include <cstring>
#include <array>
#include <vector>
#include <cstdlib>
#include <random>
#include <cassert>
// number of sites
#define N (1024*1024) //TODO: se rompe todo si compilás con -DN=123, cambiar de N a NSLOTS o algo así
#define SIZE (N * 4)
#define BLOCK_SIZE 256
#define THREAD_WORK 4
#define DENSITY 0.8924
// number of temporal steps
#define NSTEPS 10000
using namespace std;
typedef int * Manna_Array;
//fastest prng is XORWOW, default.
//~ #define curandState curandStatePhilox4_32_10_t //slower
//~ #define curandState curandStateMRG32k3a_t //slowest by far
__device__ curandState seed[1];
__device__ curandState rand_state[N];
__global__ void seedinit(int first_num){ //120ms, not top priority
curand_init(first_num,0,0,seed);
for(int i=1; i<N; i++) //must do it sequentially because of race conditions in curand(seed)
curand_init(curand(seed),0,0,rand_state+i);
}
__device__ static inline bool randbool(curandState *rand_state){
//~ return 1; //trick to fix behaviour
return 1&curand(rand_state); //TODO optimize perhaps?
}
// CONDICION INICIAL ---------------------------------------------------------------
/*
Para generar una condicion inicial suficientemente uniforme con una densidad
lo mas aproximada (exacta cuando N->infinito) al numero real DENSITY, podemos hacer asi:
*/
__global__ void inicializacion(Manna_Array __restrict__ h)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
h[i] = (int)((i+1)*DENSITY)-(int)(i*DENSITY);
}
__global__ void desestabilizacion_inicial(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if (h[gtid]) {
int k = (gtid+2*randbool(rand_state)-1+N)%N;
//~ int k = (gtid+2*((gtid%3)%2)-1+N)%N; //trick to fix behavior
atomicAdd(&dh[k], 1);
h[gtid] = 0;
}
}
__global__ void descargar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = THREAD_WORK * (blockIdx.x*blockDim.x + threadIdx.x);
assert(gtid<N);
//~ unsigned int tid = threadIdx.x; // id hilo dentro del bloque
//~ unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
//~ uint warp = tid / CUDA_WARP_SIZE; // warp dentro del bloque
//~ uint gwarp = gtid / CUDA_WARP_SIZE; // Identificador global de warp
//~ uint bid = blockIdx.x; // Identificador de bloque
if(gtid==0) *slots_activos=0;
curandState *thread_state = &rand_state[gtid]; //doesn't get better if I use a local copy and then copy back
int i=gtid;
//first 2 iterations must be protected
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
++i;
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
//mid iterations don't need protection
for(++i; i<gtid+THREAD_WORK-2; ++i){
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
++dh[k];
}
h[i] = 0;
}
}
//last 2 iterations must be protected
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
++i;
if (h[i] > 1) {
for (int j = 0; j < h[i]; ++j) {
int k = (i+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
h[i] = 0;
}
}
__global__ void actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ result)
{
unsigned int gtid = THREAD_WORK * (blockIdx.x*blockDim.x + threadIdx.x);
unsigned int tid = threadIdx.x; // id hilo dentro del bloque
unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
assert(gtid<N);
unsigned int local_result=0;
for(int i=gtid; i<gtid+THREAD_WORK; ++i){
h[i]+=dh[i];
dh[i]=0; //zeroes dh array
if(h[i]>1)
++local_result;
}
__shared__ unsigned int block_result;
block_result=0;
__syncthreads();
local_result += __shfl_down(local_result, 16);
local_result += __shfl_down(local_result, 8);
local_result += __shfl_down(local_result, 4);
local_result += __shfl_down(local_result, 2);
local_result += __shfl_down(local_result, 1);
if (0==lane) {
atomicAdd(&block_result, local_result);
}
__syncthreads();
if (0==tid) {
atomicAdd(result, block_result);
}
//~ atomicAdd(result, local_result);
}
__device__ Manna_Array h,dh;
__device__ unsigned int slots_activos;
//===================================================================
int main(){
ios::sync_with_stdio(0); cin.tie(0);
assert(N%(BLOCK_SIZE*THREAD_WORK)==0);
//random initialization
seedinit<<<1,1>>>(time(NULL)); //initialize a state per thread with some random seed
getLastCudaError("seedinit failed");
//slots
checkCudaErrors(cudaMalloc(&h, N*sizeof(int)));
checkCudaErrors(cudaMalloc(&dh, N*sizeof(int)));
checkCudaErrors(cudaMemset(dh, 0, N*sizeof(int)));
//gets actual address in device (&slots_activos is garbage)
unsigned int *slots_activos_addr;
cudaGetSymbolAddress((void **)&slots_activos_addr, slots_activos);
//initialize slots
cout << "estado inicial estable de la pila de arena...";
inicializacion<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(h);
getLastCudaError("inicializacion failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
//create some chaos among slots
cout << "estado inicial desestabilizado de la pila de arena...";
desestabilizacion_inicial<<< N/BLOCK_SIZE, BLOCK_SIZE >>>(h,dh,slots_activos_addr);
getLastCudaError("desestabilizacion failed");
actualizar<<< N/BLOCK_SIZE/THREAD_WORK, BLOCK_SIZE >>>(h,dh,slots_activos_addr);
getLastCudaError("actualizar failed");
cout << "LISTO\n";
cout << "evolucion de la pila de arena..."; cout.flush();
//~ cout<<N/(BLOCK_SIZE*THREAD_WORK)<<" "<<BLOCK_SIZE<<endl;
ofstream activity_out("activity.dat");
unsigned int activity;
int t = 0;
do {
descargar<<< N/(BLOCK_SIZE*THREAD_WORK), BLOCK_SIZE >>>(h,dh,slots_activos_addr);
getLastCudaError("descargar failed");
actualizar<<< N/(BLOCK_SIZE*THREAD_WORK), BLOCK_SIZE >>>(h,dh,slots_activos_addr);
getLastCudaError("actualizar failed");
checkCudaErrors(cudaMemcpyFromSymbol(&activity, slots_activos, sizeof(unsigned int)));
activity_out << activity << "\n";
#ifdef DEBUG
imprimir_array(h);
#endif
++t;
} while(activity > 0 && t < NSTEPS); // si la actividad decae a cero, esto no evoluciona mas...
cout << "LISTO: " << ((activity>0)?("se acabo el tiempo\n\n"):("la actividad decayo a cero\n\n")); cout.flush();
//free everything
cudaFree(h);
cudaFree(dh);
cudaFree(rand_state);
cudaFree(seed);
return 0;
}
/*
* TODO:
* Try more work per thread. Change algorithm to get rid of many atomicAdd
* make N and BLOCK_SIZE defineable during compile time
* try normal distribution with: int curand_discrete(curandState_t *state, curandDiscreteDistribution_t discrete_distribution)
*/
|
7a1f0838853689c8a6ac8c5907cfa53d2f19324a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/cudnn/cudnn.hpp>
#include <nbla/cuda/cudnn/function/lstm.hpp>
#include <nbla/random_manager.hpp>
#include <nbla/variable.hpp>
#include <array>
#include <random>
namespace nbla {
template <typename ARRAY>
typename ARRAY::value_type array_product(const ARRAY &arr) {
typename ARRAY::value_type p = 1;
for (auto a : arr) {
p *= a;
}
return p;
}
template <typename T>
__global__ void kernel_forward_copy_weights(size_t size, const T *weight,
T *param, int j_stride,
int input_dim) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
int stride;
stride = (i / input_dim) * j_stride;
param[i] = weight[i + stride];
}
}
template <typename T>
__global__ void kernel_forward_copy_bias(size_t size, const T *bias, T *param) {
NBLA_CUDA_KERNEL_LOOP(i, size) { param[i] = bias[i]; }
}
template <typename T>
__global__ void kernel_accumulate_x_and_h(size_t size, const T *d_ptr, T *d) {
NBLA_CUDA_KERNEL_LOOP(i, size) { d[i] += d_ptr[i]; }
}
template <typename T>
__global__ void kernel_backward_copy_weights(size_t size, T *g_weight,
T *g_param, int j_stride,
int input_dim, bool accum) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
int stride;
stride = (i / input_dim) * j_stride;
if (accum) {
g_weight[i + stride] += g_param[i];
} else {
g_weight[i + stride] = g_param[i];
}
}
}
template <typename T>
__global__ void kernel_backward_copy_bias(size_t size, T *g_bias, T *g_param,
bool accum) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
if (accum) {
g_bias[i] += g_param[i];
} else {
g_bias[i] = g_param[i];
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::copy_weight_bias_to_params(
Tcu *params, const Tcu *w_init, const Tcu *weight, const Tcu *bias,
bool weight_exists, bool bias_exists) {
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
int param_index = layer_id * num_lin_layers_ + lin_layer_id;
int inweight_offset = 0;
if (layer_id / num_directions_ == 0) {
if (lin_layer_id < 4) {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, input_dim_);
} else {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
(lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) +
input_dim_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
input_dim_, hidden_size_)
}
} else {
if (lin_layer_id < 4) {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
lin_layer_id * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_);
if (this->num_layers_ > 1 && weight_exists) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, num_directions_ * hidden_size_);
}
} else {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
(lin_layer_id - 4) * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_) +
num_directions_ * hidden_size_;
if (this->num_layers_ > 1 && weight_exists) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
num_directions_ * hidden_size_, hidden_size_);
}
}
}
if (bias_exists && bias && lin_layer_id < 4) {
// copy only when lin_layer_id = 0, 1, 2, 3
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_bias<Tcu>), bias_offsets_[param_index].second,
bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_,
params + bias_offsets_[param_index].first / sizeof(T));
}
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::copy_params_to_gradients(
Tcu *params, Tcu *w_init, Tcu *weight, Tcu *bias, bool w_init_accum,
bool w_accum, bool b_accum, bool w_init_propagate, bool w_propagate,
bool b_propagate) {
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
int param_index = layer_id * num_lin_layers_ + lin_layer_id;
int inweight_offset = 0;
if (layer_id / num_directions_ == 0) {
if (lin_layer_id < 4) {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_);
if (w_init_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, input_dim_, w_init_accum);
}
} else {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
(lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) +
input_dim_;
if (w_init_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
input_dim_, hidden_size_, w_init_accum);
}
}
} else {
if (lin_layer_id < 4) {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
lin_layer_id * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_);
if (w_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, num_directions_ * hidden_size_, w_accum);
}
} else {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
(lin_layer_id - 4) * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_) +
num_directions_ * hidden_size_;
if (w_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
num_directions_ * hidden_size_, hidden_size_, w_accum);
}
}
}
if (bias && b_propagate && lin_layer_id < 4) {
// copy only when lin_layer_id = 0, 1, 2, 3
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_bias<Tcu>), bias_offsets_[param_index].second,
bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_,
params + bias_offsets_[param_index].first / sizeof(T), b_accum);
}
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
// Create x descriptors and y descriptors by resizing
// Set descriptors
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
Shape_t inshape = inputs[0]->shape();
Shape_t hshape = inputs[1]->shape();
Shape_t cshape = inputs[2]->shape();
Shape_t outshape = outputs[0]->shape();
// Check input dimensions
NBLA_CHECK(inputs[0]->ndim() == 3, error_code::value,
"Input x must be a 3 dimensional array with a shape of (steps, "
"batch_size, input_size).");
// Get input dimensions
cudnnDataType_t dt_ = cudnn_data_type<T>::type();
seq_len_ = inshape[0];
int batch_size = inshape[1];
input_dim_ = inshape[2];
// Assuming this function takes h as (numLayer, numD, B, M)
hidden_size_ = inputs[1]->shape()[3];
inputMode = CUDNN_LINEAR_INPUT;
num_directions_ = this->bidirectional_ ? 2 : 1;
direction = this->bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL;
RNNMode = miopenLSTM;
num_lin_layers_ = 8;
// Check shape of h & c
const char *error_msg_h = "Input h must be a 4 dimensional array with a "
"shape of (num_layers, num_directions, batch_size, "
"hidden_size).";
NBLA_CHECK(inputs[1]->ndim() == 4, error_code::value, error_msg_h);
NBLA_CHECK(hshape[0] == this->num_layers_, error_code::value, error_msg_h);
NBLA_CHECK(hshape[1] == num_directions_, error_code::value, error_msg_h);
NBLA_CHECK(hshape[2] == batch_size, error_code::value, error_msg_h);
NBLA_CHECK(hshape == cshape, error_code::value,
"Input c must has the same shape as input h.");
// Check weight shape at 0th layer
Shape_t w0_shape = inputs[3]->shape();
const char *error_msg_w0 = "Input w0 must be a 4 dimensional array with a "
"shape of (num_directions, 4, hidden_size, "
"input_size + hidden_size).";
NBLA_CHECK(inputs[2]->ndim() == 4, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[0] == num_directions_, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[1] == 4, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[2] == hidden_size_, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[3] == hidden_size_ + input_dim_, error_code::value,
error_msg_w0);
weight_exists_ = true;
bias_exists_ = true;
if (inputs.size() == 4) {
weight_exists_ = false;
bias_exists_ = false;
} else if (inputs.size() == 5) {
Shape_t opt_shape = inputs[4]->shape();
if (this->num_layers_ > 1 && opt_shape.size() == 5) {
bias_exists_ = false;
} else if (this->num_layers_ > 1 && opt_shape.size() != 5) {
NBLA_ERROR(error_code::value,
"Weight argument must be passed when num_layers > 1");
} else if (this->num_layers_ == 1 && opt_shape.size() != 4) {
NBLA_ERROR(error_code::value,
"Weight argument cannot be passed when num_layers == 1");
} else if (this->num_layers_ == 1 && opt_shape.size() == 4) {
weight_exists_ = false;
}
} else if ((inputs.size() > 5) && (this->num_layers_ == 1)) {
NBLA_ERROR(error_code::value,
"Weight argument cannot be passed when num_layers == 1");
}
// Check weight shape
if (weight_exists_) {
Shape_t w_shape = inputs[4]->shape();
const char *error_msg_w = "Input w must be a 5 dimensional array with a "
"shape of (num_layers - 1, num_directions, 4, "
"hidden_size, num_directions * hidden_size + "
"hidden_size).";
NBLA_CHECK(inputs[4]->ndim() == 5, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[0] == this->num_layers_ - 1, error_code::value,
error_msg_w);
NBLA_CHECK(w_shape[1] == num_directions_, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[2] == 4, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[3] == hidden_size_, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[4] == num_directions_ * hidden_size_ + hidden_size_,
error_code::value, error_msg_w);
}
// Check bias shape
if (bias_exists_) {
const int b_index = weight_exists_ ? 5 : 4;
Shape_t b_shape = inputs[b_index]->shape();
const char *error_msg_b = "Input b must be a 4 dimensional array with a "
"shape of (num_layers, 4, num_directions, "
"hidden_size).";
NBLA_CHECK(inputs[b_index]->ndim() == 4, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[0] == this->num_layers_, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[1] == num_directions_, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[2] == 4, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[3] == hidden_size_, error_code::value, error_msg_b);
}
// Set X desc
// xdesc : T * (B, N, 1)
// x : (T, B, N) row-major
x_desc_.reset(new WCudnnTensorDescArray(seq_len_));
for (auto &x : x_desc_->desc_array()) {
std::array<int, 3> dimA{batch_size, input_dim_, 1};
std::array<int, 3> strideA{input_dim_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(x, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(),
strideA.data()));
}
// Set hx and hy desc
// hxDesc : (numLayer * numD, B, M)
// hx : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M)
// row-major
// hyDesc : (numLayer * numD, B, M)
// hy : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M)
// row-major
{
std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size,
hidden_size_};
std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1};
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(h_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(h_n_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
}
// cx and cy
{
std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size,
hidden_size_};
std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1};
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(c_x_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(c_y_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
}
// Set Y desc
// yDesc : T * (B, M * numD, 1)
// y : (T, B, M, numD) row-major, >>> or (T, B, numD, M)
y_desc_.reset(new WCudnnTensorDescArray(seq_len_));
for (auto &y : y_desc_->desc_array()) {
std::array<int, 3> dimA{batch_size, hidden_size_ * num_directions_, 1};
std::array<int, 3> strideA{hidden_size_ * num_directions_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(y, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(),
strideA.data()));
}
// Get an RNN algorithm using cudnnGetRNNAlgorithm or cudnnFindRNNAlgorithm.
// NOTE: find algorithm executes many algorithms exhaustively, and find a best
// one.
// Set dropout descriptor
size_t dropout_stateSize;
NBLA_CUDNN_CHECK(cudnnDropoutGetStatesSize(cudnn_handle, &dropout_stateSize));
state_array_.reshape(Shape_t{static_cast<Size_t>(dropout_stateSize)}, true);
void *state_ptr =
state_array_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
std::mt19937 &rgen =
SingletonManager::get<RandomManager>()->get_rand_generator();
std::uniform_int_distribution<> dist(0, 999);
NBLA_CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_.desc, cudnn_handle,
this->dropout_, state_ptr,
dropout_stateSize, dist(rgen)));
// Set RNN descriptor.
#if CUDNN_VERSION >= 7000
NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
cudnn_handle, rnn_desc_.desc, hidden_size_, this->num_layers_,
dropout_desc_.desc, inputMode, direction, RNNMode,
CUDNN_RNN_ALGO_STANDARD, dt_));
#else
NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor(rnn_desc_.desc, hidden_size_,
this->num_layers_, dropout_desc_.desc,
inputMode, direction, RNNMode, dt_));
#endif
// Get workspace size and reserve size
NBLA_CUDNN_CHECK(cudnnGetRNNWorkspaceSize(cudnn_handle, rnn_desc_.desc,
seq_len_, x_desc_->data(),
&workspace_size_));
if (this->training_) {
NBLA_CUDNN_CHECK(
cudnnGetRNNTrainingReserveSize(cudnn_handle, rnn_desc_.desc, seq_len_,
x_desc_->data(), &reserve_size_));
}
// Get number of pararameters both in bytes and in elements.
NBLA_CUDNN_CHECK(cudnnGetRNNParamsSize(cudnn_handle, rnn_desc_.desc,
x_desc_->data()[0],
¶ms_size_in_bytes_, dt_));
total_params_ = params_size_in_bytes_ / sizeof(T);
// Set params descriptor
{
std::array<int, 3> filter_dims{(int)total_params_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetFilterNdDescriptor(
params_desc_.desc, cudnn_data_type<T>::type(), CUDNN_TENSOR_NCHW, 3,
filter_dims.data()));
}
// Calculate address corerspondences between input parameters (weights and
// biases) and flattened parameters buffer.
// weight : [H, I+H]
// bias : [H]
// Temporary buffer. This is used only for getting address offsets of matrix
// and biases from the head of the params pointer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
Tcu *params =
params_array.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
weight_offsets_.clear();
bias_offsets_.clear();
WCudnnFilterDesc lin_layer_mat_desc;
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
void *matrix_pointer;
int nb_dims;
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
std::array<int, 3> dim;
// Get an address pointing to a weight matrix corresponding layer_id and
// linear_id, and its shape.
NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerMatrixParams(
cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0],
params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc,
&matrix_pointer));
NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3,
&data_type, &format, &nb_dims,
dim.data()));
// Size of the weight matrix can be obtained by a product of dim
// elements.
int weight_size = array_product(dim);
weight_offsets_.push_back(
{intptr_t(matrix_pointer) - intptr_t(params), weight_size});
// Get an address pointer of a bias vector corresponding to layer_id and
// linear_id, and get its size.
NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerBiasParams(
cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0],
params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc,
&matrix_pointer));
NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3,
&data_type, &format, &nb_dims,
dim.data()));
// Size of the bias vector can be obtained by a product of dim elements.
int bias_size = array_product(dim);
bias_offsets_.push_back(
{intptr_t(matrix_pointer) - intptr_t(params), bias_size});
}
}
// Set output shapes
outputs[0]->reshape({seq_len_, batch_size, num_directions_ * hidden_size_},
true);
outputs[1]->reshape(inputs[1]->shape(), true);
outputs[2]->reshape(inputs[2]->shape(), true);
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->training_) { // Training mode.
forward_impl_training(inputs, outputs);
} else { // Testing mode.
forward_impl_inference(inputs, outputs);
}
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl_training(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
// Inputs and outputs
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
// Create flattened weight buffer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
if (mem_reservespace_.array()->get_num_arrays() > 0) {
NBLA_CHECK(mem_reservespace_.size() == reserve_size_, error_code::value,
"reserve_size_ is inconsistent with the previously set "
"reservespace size.");
} else {
mem_reservespace_.reshape({static_cast<Size_t>(reserve_size_)}, true);
}
void *mem_reserve_buff =
mem_reservespace_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
NBLA_CUDNN_CHECK(cudnnRNNForwardTraining(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc,
h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y,
h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_buff, workspace_size_,
mem_reserve_buff, reserve_size_));
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl_inference(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
// Create flattened weight buffer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
NBLA_CUDNN_CHECK(cudnnRNNForwardInference(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc,
h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y,
h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_buff, workspace_size_));
}
template <typename T>
void LSTMCudaCudnn<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] ||
propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) ||
(inputs.size() > 5 && propagate_down[5]))) {
return;
}
NBLA_CHECK(this->training_, error_code::value,
"Backward is called for training only.");
NBLA_CHECK(mem_reservespace_.array()->get_num_arrays() > 0, error_code::value,
"Reserve space should be allocated memory space.");
NBLA_CHECK(mem_reservespace_.size() == reserve_size_, error_code::value,
"reserve_size_ is inconsistent with the previously set "
"reservespace size.");
if (inputs.size() > 5 && propagate_down[5]) {
NBLA_CHECK(propagate_down[3] == propagate_down[4], error_code::value,
"If bias is backpropagated, so should weights.");
}
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
const Tcu *g_h_n = outputs[1]->get_grad_pointer<Tcu>(this->ctx_);
const Tcu *g_c_n = outputs[2]->get_grad_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
const Tcu *y = outputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h_n = outputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c_n = outputs[2]->get_data_pointer<Tcu>(this->ctx_);
Tcu *g_x{nullptr};
Tcu *g_h{nullptr};
Tcu *g_c{nullptr};
Tcu *g_w_init{nullptr};
Tcu *g_weight{nullptr};
Tcu *g_bias{nullptr};
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
NdArray g_params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
g_params_array.zero();
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
Tcu *g_params = g_params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
if (propagate_down[0]) {
g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
}
if (propagate_down[1]) {
g_h = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
}
if (propagate_down[2]) {
g_c = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]);
}
if (propagate_down[3]) {
g_w_init = inputs[3]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[3]);
}
if (inputs.size() == 5 && propagate_down[4]) {
if (weight_exists_) {
g_weight =
inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
} else if (bias_exists_) {
g_bias = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
}
}
if (inputs.size() == 6 && propagate_down[4]) {
g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
}
if (inputs.size() == 6 && propagate_down[5]) {
g_bias = inputs[5]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[5]);
}
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
void *mem_reserve_buff =
mem_reservespace_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
NdArray mem_x_accum;
NdArray mem_h_accum;
NdArray mem_c_accum;
Tcu *dx_tmp = g_x;
Tcu *dh_tmp = g_h;
Tcu *dc_tmp = g_c;
if (!propagate_down[0] || accum[0]) {
mem_x_accum.reshape({static_cast<Size_t>(inputs[0]->size() * sizeof(Tcu))},
true);
dx_tmp = mem_x_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
if (!propagate_down[1] || accum[1]) {
mem_h_accum.reshape({static_cast<Size_t>(inputs[1]->size() * sizeof(Tcu))},
true);
dh_tmp = mem_h_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
if (!propagate_down[2] || accum[2]) {
mem_c_accum.reshape({static_cast<Size_t>(inputs[2]->size() * sizeof(Tcu))},
true);
dc_tmp = mem_c_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
NBLA_CUDNN_CHECK(cudnnRNNBackwardData(
cudnn_handle, rnn_desc_.desc, seq_len_, y_desc_->data(), y,
y_desc_->data(), g_y, h_n_desc_.desc, g_h_n, c_y_desc_.desc, g_c_n,
params_desc_.desc, params, h_desc_.desc, h, c_x_desc_.desc, c,
x_desc_->data(), dx_tmp, h_desc_.desc, dh_tmp, c_x_desc_.desc, dc_tmp,
mem_buff, workspace_size_, mem_reserve_buff, reserve_size_));
if (propagate_down[0] && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[0]->size(), dx_tmp, g_x);
}
if (propagate_down[1] && accum[1]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[1]->size(), dh_tmp, g_h);
}
if (propagate_down[2] && accum[2]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[3]->size(), dc_tmp, g_c);
}
if (propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) ||
(inputs.size() == 6 && propagate_down[5])) {
NBLA_CUDNN_CHECK(cudnnRNNBackwardWeights(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x,
h_desc_.desc, h, y_desc_->data(), y, mem_buff, workspace_size_,
params_desc_.desc, g_params, mem_reserve_buff, reserve_size_));
}
bool w_init_accum = false;
bool w_accum = false;
bool b_accum = false;
bool w_prop = false;
bool b_prop = false;
if (propagate_down[3] && accum[3]) {
w_init_accum = true;
}
if (inputs.size() > 4 && propagate_down[4]) {
if (inputs.size() == 5 && weight_exists_) {
w_prop = true;
if (accum[4]) {
w_accum = true;
}
} else if (inputs.size() == 5 && bias_exists_) {
b_prop = true;
if (accum[4]) {
b_accum = true;
}
} else {
w_prop = true;
if (accum[4]) {
w_accum = true;
}
}
}
if (inputs.size() == 6 && propagate_down[5]) {
b_prop = true;
if (accum[5]) {
b_accum = true;
}
}
this->copy_params_to_gradients(g_params, g_w_init, g_weight, g_bias,
w_init_accum, w_accum, b_accum,
propagate_down[3], w_prop, b_prop);
}
} // namespace nbla
|
7a1f0838853689c8a6ac8c5907cfa53d2f19324a.cu
|
// Copyright 2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/cudnn/cudnn.hpp>
#include <nbla/cuda/cudnn/function/lstm.hpp>
#include <nbla/random_manager.hpp>
#include <nbla/variable.hpp>
#include <array>
#include <random>
namespace nbla {
template <typename ARRAY>
typename ARRAY::value_type array_product(const ARRAY &arr) {
typename ARRAY::value_type p = 1;
for (auto a : arr) {
p *= a;
}
return p;
}
template <typename T>
__global__ void kernel_forward_copy_weights(size_t size, const T *weight,
T *param, int j_stride,
int input_dim) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
int stride;
stride = (i / input_dim) * j_stride;
param[i] = weight[i + stride];
}
}
template <typename T>
__global__ void kernel_forward_copy_bias(size_t size, const T *bias, T *param) {
NBLA_CUDA_KERNEL_LOOP(i, size) { param[i] = bias[i]; }
}
template <typename T>
__global__ void kernel_accumulate_x_and_h(size_t size, const T *d_ptr, T *d) {
NBLA_CUDA_KERNEL_LOOP(i, size) { d[i] += d_ptr[i]; }
}
template <typename T>
__global__ void kernel_backward_copy_weights(size_t size, T *g_weight,
T *g_param, int j_stride,
int input_dim, bool accum) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
int stride;
stride = (i / input_dim) * j_stride;
if (accum) {
g_weight[i + stride] += g_param[i];
} else {
g_weight[i + stride] = g_param[i];
}
}
}
template <typename T>
__global__ void kernel_backward_copy_bias(size_t size, T *g_bias, T *g_param,
bool accum) {
NBLA_CUDA_KERNEL_LOOP(i, size) {
if (accum) {
g_bias[i] += g_param[i];
} else {
g_bias[i] = g_param[i];
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::copy_weight_bias_to_params(
Tcu *params, const Tcu *w_init, const Tcu *weight, const Tcu *bias,
bool weight_exists, bool bias_exists) {
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
int param_index = layer_id * num_lin_layers_ + lin_layer_id;
int inweight_offset = 0;
if (layer_id / num_directions_ == 0) {
if (lin_layer_id < 4) {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, input_dim_);
} else {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
(lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) +
input_dim_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
input_dim_, hidden_size_)
}
} else {
if (lin_layer_id < 4) {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
lin_layer_id * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_);
if (this->num_layers_ > 1 && weight_exists) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, num_directions_ * hidden_size_);
}
} else {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
(lin_layer_id - 4) * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_) +
num_directions_ * hidden_size_;
if (this->num_layers_ > 1 && weight_exists) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
num_directions_ * hidden_size_, hidden_size_);
}
}
}
if (bias_exists && bias && lin_layer_id < 4) {
// copy only when lin_layer_id = 0, 1, 2, 3
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_copy_bias<Tcu>), bias_offsets_[param_index].second,
bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_,
params + bias_offsets_[param_index].first / sizeof(T));
}
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::copy_params_to_gradients(
Tcu *params, Tcu *w_init, Tcu *weight, Tcu *bias, bool w_init_accum,
bool w_accum, bool b_accum, bool w_init_propagate, bool w_propagate,
bool b_propagate) {
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
int param_index = layer_id * num_lin_layers_ + lin_layer_id;
int inweight_offset = 0;
if (layer_id / num_directions_ == 0) {
if (lin_layer_id < 4) {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
lin_layer_id * hidden_size_ * (input_dim_ + hidden_size_);
if (w_init_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, input_dim_, w_init_accum);
}
} else {
inweight_offset =
layer_id * (input_dim_ + hidden_size_) * 4 * hidden_size_ +
(lin_layer_id - 4) * hidden_size_ * (input_dim_ + hidden_size_) +
input_dim_;
if (w_init_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, w_init + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
input_dim_, hidden_size_, w_init_accum);
}
}
} else {
if (lin_layer_id < 4) {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
lin_layer_id * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_);
if (w_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
hidden_size_, num_directions_ * hidden_size_, w_accum);
}
} else {
inweight_offset =
(layer_id - num_directions_) *
(num_directions_ * hidden_size_ + hidden_size_) * 4 *
hidden_size_ +
(lin_layer_id - 4) * hidden_size_ *
(num_directions_ * hidden_size_ + hidden_size_) +
num_directions_ * hidden_size_;
if (w_propagate) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_weights<Tcu>),
weight_offsets_[param_index].second, weight + inweight_offset,
params + weight_offsets_[param_index].first / sizeof(T),
num_directions_ * hidden_size_, hidden_size_, w_accum);
}
}
}
if (bias && b_propagate && lin_layer_id < 4) {
// copy only when lin_layer_id = 0, 1, 2, 3
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_copy_bias<Tcu>), bias_offsets_[param_index].second,
bias + 4 * layer_id * hidden_size_ + lin_layer_id * hidden_size_,
params + bias_offsets_[param_index].first / sizeof(T), b_accum);
}
}
}
}
template <typename T>
void LSTMCudaCudnn<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
// Create x descriptors and y descriptors by resizing
// Set descriptors
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
Shape_t inshape = inputs[0]->shape();
Shape_t hshape = inputs[1]->shape();
Shape_t cshape = inputs[2]->shape();
Shape_t outshape = outputs[0]->shape();
// Check input dimensions
NBLA_CHECK(inputs[0]->ndim() == 3, error_code::value,
"Input x must be a 3 dimensional array with a shape of (steps, "
"batch_size, input_size).");
// Get input dimensions
cudnnDataType_t dt_ = cudnn_data_type<T>::type();
seq_len_ = inshape[0];
int batch_size = inshape[1];
input_dim_ = inshape[2];
// Assuming this function takes h as (numLayer, numD, B, M)
hidden_size_ = inputs[1]->shape()[3];
inputMode = CUDNN_LINEAR_INPUT;
num_directions_ = this->bidirectional_ ? 2 : 1;
direction = this->bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL;
RNNMode = CUDNN_LSTM;
num_lin_layers_ = 8;
// Check shape of h & c
const char *error_msg_h = "Input h must be a 4 dimensional array with a "
"shape of (num_layers, num_directions, batch_size, "
"hidden_size).";
NBLA_CHECK(inputs[1]->ndim() == 4, error_code::value, error_msg_h);
NBLA_CHECK(hshape[0] == this->num_layers_, error_code::value, error_msg_h);
NBLA_CHECK(hshape[1] == num_directions_, error_code::value, error_msg_h);
NBLA_CHECK(hshape[2] == batch_size, error_code::value, error_msg_h);
NBLA_CHECK(hshape == cshape, error_code::value,
"Input c must has the same shape as input h.");
// Check weight shape at 0th layer
Shape_t w0_shape = inputs[3]->shape();
const char *error_msg_w0 = "Input w0 must be a 4 dimensional array with a "
"shape of (num_directions, 4, hidden_size, "
"input_size + hidden_size).";
NBLA_CHECK(inputs[2]->ndim() == 4, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[0] == num_directions_, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[1] == 4, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[2] == hidden_size_, error_code::value, error_msg_w0);
NBLA_CHECK(w0_shape[3] == hidden_size_ + input_dim_, error_code::value,
error_msg_w0);
weight_exists_ = true;
bias_exists_ = true;
if (inputs.size() == 4) {
weight_exists_ = false;
bias_exists_ = false;
} else if (inputs.size() == 5) {
Shape_t opt_shape = inputs[4]->shape();
if (this->num_layers_ > 1 && opt_shape.size() == 5) {
bias_exists_ = false;
} else if (this->num_layers_ > 1 && opt_shape.size() != 5) {
NBLA_ERROR(error_code::value,
"Weight argument must be passed when num_layers > 1");
} else if (this->num_layers_ == 1 && opt_shape.size() != 4) {
NBLA_ERROR(error_code::value,
"Weight argument cannot be passed when num_layers == 1");
} else if (this->num_layers_ == 1 && opt_shape.size() == 4) {
weight_exists_ = false;
}
} else if ((inputs.size() > 5) && (this->num_layers_ == 1)) {
NBLA_ERROR(error_code::value,
"Weight argument cannot be passed when num_layers == 1");
}
// Check weight shape
if (weight_exists_) {
Shape_t w_shape = inputs[4]->shape();
const char *error_msg_w = "Input w must be a 5 dimensional array with a "
"shape of (num_layers - 1, num_directions, 4, "
"hidden_size, num_directions * hidden_size + "
"hidden_size).";
NBLA_CHECK(inputs[4]->ndim() == 5, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[0] == this->num_layers_ - 1, error_code::value,
error_msg_w);
NBLA_CHECK(w_shape[1] == num_directions_, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[2] == 4, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[3] == hidden_size_, error_code::value, error_msg_w);
NBLA_CHECK(w_shape[4] == num_directions_ * hidden_size_ + hidden_size_,
error_code::value, error_msg_w);
}
// Check bias shape
if (bias_exists_) {
const int b_index = weight_exists_ ? 5 : 4;
Shape_t b_shape = inputs[b_index]->shape();
const char *error_msg_b = "Input b must be a 4 dimensional array with a "
"shape of (num_layers, 4, num_directions, "
"hidden_size).";
NBLA_CHECK(inputs[b_index]->ndim() == 4, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[0] == this->num_layers_, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[1] == num_directions_, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[2] == 4, error_code::value, error_msg_b);
NBLA_CHECK(b_shape[3] == hidden_size_, error_code::value, error_msg_b);
}
// Set X desc
// xdesc : T * (B, N, 1)
// x : (T, B, N) row-major
x_desc_.reset(new WCudnnTensorDescArray(seq_len_));
for (auto &x : x_desc_->desc_array()) {
std::array<int, 3> dimA{batch_size, input_dim_, 1};
std::array<int, 3> strideA{input_dim_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(x, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(),
strideA.data()));
}
// Set hx and hy desc
// hxDesc : (numLayer * numD, B, M)
// hx : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M)
// row-major
// hyDesc : (numLayer * numD, B, M)
// hy : (numLayer, numD, B, M) row-major >>> or (numD, numLayer, B, M)
// row-major
{
std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size,
hidden_size_};
std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1};
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(h_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(h_n_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
}
// cx and cy
{
std::array<int, 3> dimA{this->num_layers_ * num_directions_, batch_size,
hidden_size_};
std::array<int, 3> strideA{batch_size * hidden_size_, hidden_size_, 1};
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(c_x_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
NBLA_CUDNN_CHECK(
cudnnSetTensorNdDescriptor(c_y_desc_.desc, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(), strideA.data()));
}
// Set Y desc
// yDesc : T * (B, M * numD, 1)
// y : (T, B, M, numD) row-major, >>> or (T, B, numD, M)
y_desc_.reset(new WCudnnTensorDescArray(seq_len_));
for (auto &y : y_desc_->desc_array()) {
std::array<int, 3> dimA{batch_size, hidden_size_ * num_directions_, 1};
std::array<int, 3> strideA{hidden_size_ * num_directions_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetTensorNdDescriptor(y, cudnn_data_type<T>::type(),
dimA.size(), dimA.data(),
strideA.data()));
}
// Get an RNN algorithm using cudnnGetRNNAlgorithm or cudnnFindRNNAlgorithm.
// NOTE: find algorithm executes many algorithms exhaustively, and find a best
// one.
// Set dropout descriptor
size_t dropout_stateSize;
NBLA_CUDNN_CHECK(cudnnDropoutGetStatesSize(cudnn_handle, &dropout_stateSize));
state_array_.reshape(Shape_t{static_cast<Size_t>(dropout_stateSize)}, true);
void *state_ptr =
state_array_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
std::mt19937 &rgen =
SingletonManager::get<RandomManager>()->get_rand_generator();
std::uniform_int_distribution<> dist(0, 999);
NBLA_CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_.desc, cudnn_handle,
this->dropout_, state_ptr,
dropout_stateSize, dist(rgen)));
// Set RNN descriptor.
#if CUDNN_VERSION >= 7000
NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
cudnn_handle, rnn_desc_.desc, hidden_size_, this->num_layers_,
dropout_desc_.desc, inputMode, direction, RNNMode,
CUDNN_RNN_ALGO_STANDARD, dt_));
#else
NBLA_CUDNN_CHECK(cudnnSetRNNDescriptor(rnn_desc_.desc, hidden_size_,
this->num_layers_, dropout_desc_.desc,
inputMode, direction, RNNMode, dt_));
#endif
// Get workspace size and reserve size
NBLA_CUDNN_CHECK(cudnnGetRNNWorkspaceSize(cudnn_handle, rnn_desc_.desc,
seq_len_, x_desc_->data(),
&workspace_size_));
if (this->training_) {
NBLA_CUDNN_CHECK(
cudnnGetRNNTrainingReserveSize(cudnn_handle, rnn_desc_.desc, seq_len_,
x_desc_->data(), &reserve_size_));
}
// Get number of pararameters both in bytes and in elements.
NBLA_CUDNN_CHECK(cudnnGetRNNParamsSize(cudnn_handle, rnn_desc_.desc,
x_desc_->data()[0],
¶ms_size_in_bytes_, dt_));
total_params_ = params_size_in_bytes_ / sizeof(T);
// Set params descriptor
{
std::array<int, 3> filter_dims{(int)total_params_, 1, 1};
NBLA_CUDNN_CHECK(cudnnSetFilterNdDescriptor(
params_desc_.desc, cudnn_data_type<T>::type(), CUDNN_TENSOR_NCHW, 3,
filter_dims.data()));
}
// Calculate address corerspondences between input parameters (weights and
// biases) and flattened parameters buffer.
// weight : [H, I+H]
// bias : [H]
// Temporary buffer. This is used only for getting address offsets of matrix
// and biases from the head of the params pointer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
Tcu *params =
params_array.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
weight_offsets_.clear();
bias_offsets_.clear();
WCudnnFilterDesc lin_layer_mat_desc;
for (int64_t layer_id = 0; layer_id < this->num_layers_ * num_directions_;
layer_id++) {
for (int64_t lin_layer_id = 0; lin_layer_id < num_lin_layers_;
lin_layer_id++) {
void *matrix_pointer;
int nb_dims;
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
std::array<int, 3> dim;
// Get an address pointing to a weight matrix corresponding layer_id and
// linear_id, and its shape.
NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerMatrixParams(
cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0],
params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc,
&matrix_pointer));
NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3,
&data_type, &format, &nb_dims,
dim.data()));
// Size of the weight matrix can be obtained by a product of dim
// elements.
int weight_size = array_product(dim);
weight_offsets_.push_back(
{intptr_t(matrix_pointer) - intptr_t(params), weight_size});
// Get an address pointer of a bias vector corresponding to layer_id and
// linear_id, and get its size.
NBLA_CUDNN_CHECK(cudnnGetRNNLinLayerBiasParams(
cudnn_handle, rnn_desc_.desc, layer_id, x_desc_->data()[0],
params_desc_.desc, params, lin_layer_id, lin_layer_mat_desc.desc,
&matrix_pointer));
NBLA_CUDNN_CHECK(cudnnGetFilterNdDescriptor(lin_layer_mat_desc.desc, 3,
&data_type, &format, &nb_dims,
dim.data()));
// Size of the bias vector can be obtained by a product of dim elements.
int bias_size = array_product(dim);
bias_offsets_.push_back(
{intptr_t(matrix_pointer) - intptr_t(params), bias_size});
}
}
// Set output shapes
outputs[0]->reshape({seq_len_, batch_size, num_directions_ * hidden_size_},
true);
outputs[1]->reshape(inputs[1]->shape(), true);
outputs[2]->reshape(inputs[2]->shape(), true);
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->training_) { // Training mode.
forward_impl_training(inputs, outputs);
} else { // Testing mode.
forward_impl_inference(inputs, outputs);
}
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl_training(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
// Inputs and outputs
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
// Create flattened weight buffer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
if (mem_reservespace_.array()->get_num_arrays() > 0) {
NBLA_CHECK(mem_reservespace_.size() == reserve_size_, error_code::value,
"reserve_size_ is inconsistent with the previously set "
"reservespace size.");
} else {
mem_reservespace_.reshape({static_cast<Size_t>(reserve_size_)}, true);
}
void *mem_reserve_buff =
mem_reservespace_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
NBLA_CUDNN_CHECK(cudnnRNNForwardTraining(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc,
h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y,
h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_buff, workspace_size_,
mem_reserve_buff, reserve_size_));
}
template <typename T>
void LSTMCudaCudnn<T>::forward_impl_inference(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *h_n = outputs[1]->cast_data_and_get_pointer<Tcu>(this->ctx_);
Tcu *c_n = outputs[2]->cast_data_and_get_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
// Create flattened weight buffer.
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
NBLA_CUDNN_CHECK(cudnnRNNForwardInference(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x, h_desc_.desc,
h, c_x_desc_.desc, c, params_desc_.desc, params, y_desc_->data(), y,
h_n_desc_.desc, h_n, c_y_desc_.desc, c_n, mem_buff, workspace_size_));
}
template <typename T>
void LSTMCudaCudnn<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] ||
propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) ||
(inputs.size() > 5 && propagate_down[5]))) {
return;
}
NBLA_CHECK(this->training_, error_code::value,
"Backward is called for training only.");
NBLA_CHECK(mem_reservespace_.array()->get_num_arrays() > 0, error_code::value,
"Reserve space should be allocated memory space.");
NBLA_CHECK(mem_reservespace_.size() == reserve_size_, error_code::value,
"reserve_size_ is inconsistent with the previously set "
"reservespace size.");
if (inputs.size() > 5 && propagate_down[5]) {
NBLA_CHECK(propagate_down[3] == propagate_down[4], error_code::value,
"If bias is backpropagated, so should weights.");
}
cuda_set_device(this->device_);
auto cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *w_init = inputs[3]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *weight{nullptr};
const Tcu *bias{nullptr};
const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
const Tcu *g_h_n = outputs[1]->get_grad_pointer<Tcu>(this->ctx_);
const Tcu *g_c_n = outputs[2]->get_grad_pointer<Tcu>(this->ctx_);
if (inputs.size() == 5) {
if (weight_exists_) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
} else if (bias_exists_) {
bias = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
}
}
if (inputs.size() > 5) {
weight = inputs[4]->get_data_pointer<Tcu>(this->ctx_);
bias = inputs[5]->get_data_pointer<Tcu>(this->ctx_);
}
const Tcu *y = outputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *h_n = outputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *c_n = outputs[2]->get_data_pointer<Tcu>(this->ctx_);
Tcu *g_x{nullptr};
Tcu *g_h{nullptr};
Tcu *g_c{nullptr};
Tcu *g_w_init{nullptr};
Tcu *g_weight{nullptr};
Tcu *g_bias{nullptr};
NdArray params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
NdArray g_params_array(Shape_t{static_cast<Size_t>(params_size_in_bytes_)});
params_array.zero(); // Initialize params with 0
g_params_array.zero();
Tcu *params = params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
Tcu *g_params = g_params_array.cast(dtypes::BYTE, this->ctx_)->pointer<Tcu>();
this->copy_weight_bias_to_params(params, w_init, weight, bias, weight_exists_,
bias_exists_);
if (propagate_down[0]) {
g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
}
if (propagate_down[1]) {
g_h = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
}
if (propagate_down[2]) {
g_c = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]);
}
if (propagate_down[3]) {
g_w_init = inputs[3]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[3]);
}
if (inputs.size() == 5 && propagate_down[4]) {
if (weight_exists_) {
g_weight =
inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
} else if (bias_exists_) {
g_bias = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
}
}
if (inputs.size() == 6 && propagate_down[4]) {
g_weight = inputs[4]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[4]);
}
if (inputs.size() == 6 && propagate_down[5]) {
g_bias = inputs[5]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[5]);
}
void *mem_buff = nullptr;
NdArray mem_workspace;
if (workspace_size_) {
mem_workspace.reshape({static_cast<Size_t>(workspace_size_)}, true);
mem_buff =
mem_workspace.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
}
void *mem_reserve_buff =
mem_reservespace_.cast(dtypes::BYTE, this->ctx_, true)->pointer<void>();
NdArray mem_x_accum;
NdArray mem_h_accum;
NdArray mem_c_accum;
Tcu *dx_tmp = g_x;
Tcu *dh_tmp = g_h;
Tcu *dc_tmp = g_c;
if (!propagate_down[0] || accum[0]) {
mem_x_accum.reshape({static_cast<Size_t>(inputs[0]->size() * sizeof(Tcu))},
true);
dx_tmp = mem_x_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
if (!propagate_down[1] || accum[1]) {
mem_h_accum.reshape({static_cast<Size_t>(inputs[1]->size() * sizeof(Tcu))},
true);
dh_tmp = mem_h_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
if (!propagate_down[2] || accum[2]) {
mem_c_accum.reshape({static_cast<Size_t>(inputs[2]->size() * sizeof(Tcu))},
true);
dc_tmp = mem_c_accum.cast(dtypes::BYTE, this->ctx_, true)->pointer<Tcu>();
}
NBLA_CUDNN_CHECK(cudnnRNNBackwardData(
cudnn_handle, rnn_desc_.desc, seq_len_, y_desc_->data(), y,
y_desc_->data(), g_y, h_n_desc_.desc, g_h_n, c_y_desc_.desc, g_c_n,
params_desc_.desc, params, h_desc_.desc, h, c_x_desc_.desc, c,
x_desc_->data(), dx_tmp, h_desc_.desc, dh_tmp, c_x_desc_.desc, dc_tmp,
mem_buff, workspace_size_, mem_reserve_buff, reserve_size_));
if (propagate_down[0] && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[0]->size(), dx_tmp, g_x);
}
if (propagate_down[1] && accum[1]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[1]->size(), dh_tmp, g_h);
}
if (propagate_down[2] && accum[2]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_accumulate_x_and_h<Tcu>),
inputs[3]->size(), dc_tmp, g_c);
}
if (propagate_down[3] || (inputs.size() > 4 && propagate_down[4]) ||
(inputs.size() == 6 && propagate_down[5])) {
NBLA_CUDNN_CHECK(cudnnRNNBackwardWeights(
cudnn_handle, rnn_desc_.desc, seq_len_, x_desc_->data(), x,
h_desc_.desc, h, y_desc_->data(), y, mem_buff, workspace_size_,
params_desc_.desc, g_params, mem_reserve_buff, reserve_size_));
}
bool w_init_accum = false;
bool w_accum = false;
bool b_accum = false;
bool w_prop = false;
bool b_prop = false;
if (propagate_down[3] && accum[3]) {
w_init_accum = true;
}
if (inputs.size() > 4 && propagate_down[4]) {
if (inputs.size() == 5 && weight_exists_) {
w_prop = true;
if (accum[4]) {
w_accum = true;
}
} else if (inputs.size() == 5 && bias_exists_) {
b_prop = true;
if (accum[4]) {
b_accum = true;
}
} else {
w_prop = true;
if (accum[4]) {
w_accum = true;
}
}
}
if (inputs.size() == 6 && propagate_down[5]) {
b_prop = true;
if (accum[5]) {
b_accum = true;
}
}
this->copy_params_to_gradients(g_params, g_w_init, g_weight, g_bias,
w_init_accum, w_accum, b_accum,
propagate_down[3], w_prop, b_prop);
}
} // namespace nbla
|
46503901bd3349799bc350c2adb7eb5e57d40582.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_iface.h"
#include "cuda_mparticles.h"
#include "cuda_mfields.h"
#include "cuda_bits.h"
#include "psc_bits.h"
#include "heating_spot_foil.hxx"
#include "heating_cuda_impl.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <hiprand/hiprand_kernel.h>
#include <cstdio>
#define THREADS_PER_BLOCK 256
// ----------------------------------------------------------------------
// cuda_heating_params
struct cuda_heating_params {
float_3 *d_xb_by_patch;
};
// ----------------------------------------------------------------------
// cuda_heating_params_set
template<typename BS>
static void cuda_heating_params_set(cuda_heating_params& h_prm, cuda_mparticles<BS>* cmprts)
{
hipError_t ierr;
ierr = hipMalloc(&h_prm.d_xb_by_patch, cmprts->n_patches * sizeof(float_3));
cudaCheck(ierr);
ierr = hipMemcpy(h_prm.d_xb_by_patch, cmprts->xb_by_patch.data(),
cmprts->n_patches * sizeof(float_3), hipMemcpyHostToDevice);
cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_heating_params_free
static void cuda_heating_params_free(cuda_heating_params& h_prm)
{
hipError_t ierr;
ierr = hipFree(&h_prm.d_xb_by_patch);
cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// bm_normal2
static inline float2
bm_normal2(void)
{
float u1, u2;
do {
u1 = random() * (1.f / RAND_MAX);
u2 = random() * (1.f / RAND_MAX);
} while (u1 <= 0.f);
float2 rv;
rv.x = sqrtf(-2.f * logf(u1)) * cosf(2.f * M_PI * u2);
rv.y = sqrtf(-2.f * logf(u1)) * sinf(2.f * M_PI * u2);
return rv;
}
// ----------------------------------------------------------------------
// k_curand_setup
__global__ static void
k_curand_setup(hiprandState_t *d_curand_states, int b_my)
{
int bid = blockIdx.y * b_my + blockIdx.x;
int id = threadIdx.x + bid * THREADS_PER_BLOCK;
hiprand_init(1234, id % 1024, 0, &d_curand_states[id]); // FIXME, % 1024 hack
}
struct cuda_heating_foil;
template<typename BS>
__global__ static void
k_heating_run_foil(cuda_heating_foil d_foil, DMparticlesCuda<BS> dmprts, struct cuda_heating_params prm,
hiprandState_t *d_curand_states);
// ======================================================================
// cuda_heating_foil
struct cuda_heating_foil : HeatingSpotFoilParams
{
cuda_heating_foil(const HeatingSpotFoilParams& params, int kind, double heating_dt)
: HeatingSpotFoilParams(params), kind(kind), heating_dt(heating_dt),
h_prm_{}
{
float width = zh - zl;
fac = (8.f * pow(T, 1.5)) / (sqrt(Mi) * width);
}
~cuda_heating_foil()
{
#if 0 // FIXME
cuda_heating_params_free(h_prm_);
hipError_t ierr;
ierr = hipFree(d_curand_states_);
cudaCheck(ierr);
#endif
}
__host__ __device__ float get_H(float *xx)
{
if (xx[2] <= zl || xx[2] >= zh) {
return 0;
}
return fac * exp(-(sqr(xx[0] - xc) +
sqr(xx[1] - yc)) / sqr(rH));
}
// ----------------------------------------------------------------------
// particle_kick
__host__ void particle_kick(float4 *pxi4, float H)
{
float2 r01 = bm_normal2();
float2 r23 = bm_normal2();
float Dp = sqrtf(H * heating_dt);
pxi4->x += Dp * r01.x;
pxi4->y += Dp * r01.y;
pxi4->z += Dp * r23.x;
}
// ----------------------------------------------------------------------
// d_particle_kick
__device__ void d_particle_kick(float4 *pxi4, float H, hiprandState_t *state)
{
float2 r01 = hiprand_normal2(state);
float r2 = hiprand_normal(state);
float Dp = sqrtf(H * heating_dt);
pxi4->x += Dp * r01.x;
pxi4->y += Dp * r01.y;
pxi4->z += Dp * r2;
}
// ----------------------------------------------------------------------
// run_foil
template<typename BS>
void run_foil(cuda_mparticles<BS>* cmprts, hiprandState_t *d_curand_states)
{
dim3 dimGrid = BlockSimple<BS, dim_yz>::dimGrid(*cmprts);
hipLaunchKernelGGL(( k_heating_run_foil<BS>)
, dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, *this, *cmprts, h_prm_, d_curand_states);
cuda_sync_if_enabled();
}
// ----------------------------------------------------------------------
// operator()
template<typename BS>
void operator()(cuda_mparticles<BS>* cmprts)
{
//return cuda_heating_run_foil_gold(cmprts);
static bool first_time = true;
if (first_time) {
cuda_heating_params_set(h_prm_, cmprts);
dim3 dimGrid = BlockSimple<BS, dim_yz>::dimGrid(*cmprts);
int n_threads = dimGrid.x * dimGrid.y * THREADS_PER_BLOCK;
hipError_t ierr;
ierr = hipMalloc(&d_curand_states_, n_threads * sizeof(*d_curand_states_));
cudaCheck(ierr);
hipLaunchKernelGGL(( k_curand_setup), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_curand_states_, cmprts->b_mx()[1]);
cuda_sync_if_enabled();
first_time = false;
}
if (cmprts->need_reorder) {
cmprts->reorder();
}
run_foil<BS>(cmprts, d_curand_states_);
}
// params
int kind;
// state (FIXME, shouldn't be part of the interface)
float fac;
float heating_dt;
cuda_heating_params h_prm_;
hiprandState_t* d_curand_states_;
};
// ----------------------------------------------------------------------
// cuda_heating_run_foil_gold
template<typename BS>
void cuda_heating_run_foil_gold(cuda_heating_foil& foil, cuda_mparticles<BS>* cmprts)
{
for (int b = 0; b < cmprts->n_blocks; b++) {
int p = b / cmprts->n_blocks_per_patch;
for (int n = cmprts->d_off[b]; n < cmprts->d_off[b+1]; n++) {
float4 xi4 = cmprts->d_xi4[n];
int prt_kind = cuda_float_as_int(xi4.w);
if (prt_kind != foil.kind) {
continue;
}
float *xb = &cmprts->xb_by_patch[p][0];
float xx[3] = {
xi4.x + xb[0],
xi4.y + xb[1],
xi4.z + xb[2],
};
float H = foil.get_H(xx);
// float4 pxi4 = d_pxi4[n];
// printf("%s xx = %g %g %g H = %g px = %g %g %g\n", (H > 0) ? "H" : " ",
// xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
// pxi4.w = H;
// d_pxi4[n] = pxi4;
if (H > 0) {
float4 pxi4 = cmprts->d_pxi4[n];
foil.particle_kick(&pxi4, H);
cmprts->d_pxi4[n] = pxi4;
// printf("H xx = %g %g %g H = %g px = %g %g %g\n", xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
}
}
}
}
// ----------------------------------------------------------------------
// k_heating_run_foil
template<typename BS>
__global__ static void
__launch_bounds__(THREADS_PER_BLOCK, 3)
k_heating_run_foil(cuda_heating_foil d_foil, DMparticlesCuda<BS> dmprts, struct cuda_heating_params prm,
hiprandState_t *d_curand_states)
{
BlockSimple<BS, dim_yz> current_block;
if (!current_block.init(dmprts)) {
return;
}
float_3 xb; // __shared__
xb[0] = prm.d_xb_by_patch[current_block.p][0];
xb[1] = prm.d_xb_by_patch[current_block.p][1];
xb[2] = prm.d_xb_by_patch[current_block.p][2];
int id = threadIdx.x + current_block.bid * THREADS_PER_BLOCK;
/* Copy state to local memory for efficiency */
hiprandState_t local_state = d_curand_states[id];
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
float4 xi4 = dmprts.xi4_[n];
int prt_kind = __float_as_int(xi4.w);
if (prt_kind != d_foil.kind) {
continue;
}
float xx[3] = {
xi4.x + xb[0],
xi4.y + xb[1],
xi4.z + xb[2],
};
float H = d_foil.get_H(xx);
//d_pxi4[n].w = H;
if (H > 0) {
float4 pxi4 = dmprts.pxi4_[n];
d_foil.d_particle_kick(&pxi4, H, &local_state);
dmprts.pxi4_[n] = pxi4;
}
}
d_curand_states[id] = local_state;
}
// ======================================================================
template<typename BS>
template<typename FUNC>
HeatingCuda<BS>::HeatingCuda(const Grid_t& grid, int interval, int kind, FUNC get_H)
{
foil_ = new cuda_heating_foil{get_H, kind, interval * grid.dt};
}
template<typename BS>
HeatingCuda<BS>::~HeatingCuda()
{
delete foil_;
}
template<typename BS>
void HeatingCuda<BS>::operator()(MparticlesCuda<BS>& mprts)
{
(*foil_)(mprts.cmprts());
}
// ======================================================================
template struct HeatingCuda<BS144>;
template HeatingCuda<BS144>::HeatingCuda(const Grid_t& grid, int interval, int kind, HeatingSpotFoil get_H);
template struct HeatingCuda<BS444>;
template HeatingCuda<BS444>::HeatingCuda(const Grid_t& grid, int interval, int kind, HeatingSpotFoil get_H);
|
46503901bd3349799bc350c2adb7eb5e57d40582.cu
|
#include "cuda_iface.h"
#include "cuda_mparticles.h"
#include "cuda_mfields.h"
#include "cuda_bits.h"
#include "psc_bits.h"
#include "heating_spot_foil.hxx"
#include "heating_cuda_impl.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <curand_kernel.h>
#include <cstdio>
#define THREADS_PER_BLOCK 256
// ----------------------------------------------------------------------
// cuda_heating_params
struct cuda_heating_params {
float_3 *d_xb_by_patch;
};
// ----------------------------------------------------------------------
// cuda_heating_params_set
template<typename BS>
static void cuda_heating_params_set(cuda_heating_params& h_prm, cuda_mparticles<BS>* cmprts)
{
cudaError_t ierr;
ierr = cudaMalloc(&h_prm.d_xb_by_patch, cmprts->n_patches * sizeof(float_3));
cudaCheck(ierr);
ierr = cudaMemcpy(h_prm.d_xb_by_patch, cmprts->xb_by_patch.data(),
cmprts->n_patches * sizeof(float_3), cudaMemcpyHostToDevice);
cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_heating_params_free
static void cuda_heating_params_free(cuda_heating_params& h_prm)
{
cudaError_t ierr;
ierr = cudaFree(&h_prm.d_xb_by_patch);
cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// bm_normal2
static inline float2
bm_normal2(void)
{
float u1, u2;
do {
u1 = random() * (1.f / RAND_MAX);
u2 = random() * (1.f / RAND_MAX);
} while (u1 <= 0.f);
float2 rv;
rv.x = sqrtf(-2.f * logf(u1)) * cosf(2.f * M_PI * u2);
rv.y = sqrtf(-2.f * logf(u1)) * sinf(2.f * M_PI * u2);
return rv;
}
// ----------------------------------------------------------------------
// k_curand_setup
__global__ static void
k_curand_setup(curandState *d_curand_states, int b_my)
{
int bid = blockIdx.y * b_my + blockIdx.x;
int id = threadIdx.x + bid * THREADS_PER_BLOCK;
curand_init(1234, id % 1024, 0, &d_curand_states[id]); // FIXME, % 1024 hack
}
struct cuda_heating_foil;
template<typename BS>
__global__ static void
k_heating_run_foil(cuda_heating_foil d_foil, DMparticlesCuda<BS> dmprts, struct cuda_heating_params prm,
curandState *d_curand_states);
// ======================================================================
// cuda_heating_foil
struct cuda_heating_foil : HeatingSpotFoilParams
{
cuda_heating_foil(const HeatingSpotFoilParams& params, int kind, double heating_dt)
: HeatingSpotFoilParams(params), kind(kind), heating_dt(heating_dt),
h_prm_{}
{
float width = zh - zl;
fac = (8.f * pow(T, 1.5)) / (sqrt(Mi) * width);
}
~cuda_heating_foil()
{
#if 0 // FIXME
cuda_heating_params_free(h_prm_);
cudaError_t ierr;
ierr = cudaFree(d_curand_states_);
cudaCheck(ierr);
#endif
}
__host__ __device__ float get_H(float *xx)
{
if (xx[2] <= zl || xx[2] >= zh) {
return 0;
}
return fac * exp(-(sqr(xx[0] - xc) +
sqr(xx[1] - yc)) / sqr(rH));
}
// ----------------------------------------------------------------------
// particle_kick
__host__ void particle_kick(float4 *pxi4, float H)
{
float2 r01 = bm_normal2();
float2 r23 = bm_normal2();
float Dp = sqrtf(H * heating_dt);
pxi4->x += Dp * r01.x;
pxi4->y += Dp * r01.y;
pxi4->z += Dp * r23.x;
}
// ----------------------------------------------------------------------
// d_particle_kick
__device__ void d_particle_kick(float4 *pxi4, float H, curandState *state)
{
float2 r01 = curand_normal2(state);
float r2 = curand_normal(state);
float Dp = sqrtf(H * heating_dt);
pxi4->x += Dp * r01.x;
pxi4->y += Dp * r01.y;
pxi4->z += Dp * r2;
}
// ----------------------------------------------------------------------
// run_foil
template<typename BS>
void run_foil(cuda_mparticles<BS>* cmprts, curandState *d_curand_states)
{
dim3 dimGrid = BlockSimple<BS, dim_yz>::dimGrid(*cmprts);
k_heating_run_foil<BS>
<<<dimGrid, THREADS_PER_BLOCK>>>(*this, *cmprts, h_prm_, d_curand_states);
cuda_sync_if_enabled();
}
// ----------------------------------------------------------------------
// operator()
template<typename BS>
void operator()(cuda_mparticles<BS>* cmprts)
{
//return cuda_heating_run_foil_gold(cmprts);
static bool first_time = true;
if (first_time) {
cuda_heating_params_set(h_prm_, cmprts);
dim3 dimGrid = BlockSimple<BS, dim_yz>::dimGrid(*cmprts);
int n_threads = dimGrid.x * dimGrid.y * THREADS_PER_BLOCK;
cudaError_t ierr;
ierr = cudaMalloc(&d_curand_states_, n_threads * sizeof(*d_curand_states_));
cudaCheck(ierr);
k_curand_setup<<<dimGrid, THREADS_PER_BLOCK>>>(d_curand_states_, cmprts->b_mx()[1]);
cuda_sync_if_enabled();
first_time = false;
}
if (cmprts->need_reorder) {
cmprts->reorder();
}
run_foil<BS>(cmprts, d_curand_states_);
}
// params
int kind;
// state (FIXME, shouldn't be part of the interface)
float fac;
float heating_dt;
cuda_heating_params h_prm_;
curandState* d_curand_states_;
};
// ----------------------------------------------------------------------
// cuda_heating_run_foil_gold
template<typename BS>
void cuda_heating_run_foil_gold(cuda_heating_foil& foil, cuda_mparticles<BS>* cmprts)
{
for (int b = 0; b < cmprts->n_blocks; b++) {
int p = b / cmprts->n_blocks_per_patch;
for (int n = cmprts->d_off[b]; n < cmprts->d_off[b+1]; n++) {
float4 xi4 = cmprts->d_xi4[n];
int prt_kind = cuda_float_as_int(xi4.w);
if (prt_kind != foil.kind) {
continue;
}
float *xb = &cmprts->xb_by_patch[p][0];
float xx[3] = {
xi4.x + xb[0],
xi4.y + xb[1],
xi4.z + xb[2],
};
float H = foil.get_H(xx);
// float4 pxi4 = d_pxi4[n];
// printf("%s xx = %g %g %g H = %g px = %g %g %g\n", (H > 0) ? "H" : " ",
// xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
// pxi4.w = H;
// d_pxi4[n] = pxi4;
if (H > 0) {
float4 pxi4 = cmprts->d_pxi4[n];
foil.particle_kick(&pxi4, H);
cmprts->d_pxi4[n] = pxi4;
// printf("H xx = %g %g %g H = %g px = %g %g %g\n", xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
}
}
}
}
// ----------------------------------------------------------------------
// k_heating_run_foil
template<typename BS>
__global__ static void
__launch_bounds__(THREADS_PER_BLOCK, 3)
k_heating_run_foil(cuda_heating_foil d_foil, DMparticlesCuda<BS> dmprts, struct cuda_heating_params prm,
curandState *d_curand_states)
{
BlockSimple<BS, dim_yz> current_block;
if (!current_block.init(dmprts)) {
return;
}
float_3 xb; // __shared__
xb[0] = prm.d_xb_by_patch[current_block.p][0];
xb[1] = prm.d_xb_by_patch[current_block.p][1];
xb[2] = prm.d_xb_by_patch[current_block.p][2];
int id = threadIdx.x + current_block.bid * THREADS_PER_BLOCK;
/* Copy state to local memory for efficiency */
curandState local_state = d_curand_states[id];
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
float4 xi4 = dmprts.xi4_[n];
int prt_kind = __float_as_int(xi4.w);
if (prt_kind != d_foil.kind) {
continue;
}
float xx[3] = {
xi4.x + xb[0],
xi4.y + xb[1],
xi4.z + xb[2],
};
float H = d_foil.get_H(xx);
//d_pxi4[n].w = H;
if (H > 0) {
float4 pxi4 = dmprts.pxi4_[n];
d_foil.d_particle_kick(&pxi4, H, &local_state);
dmprts.pxi4_[n] = pxi4;
}
}
d_curand_states[id] = local_state;
}
// ======================================================================
template<typename BS>
template<typename FUNC>
HeatingCuda<BS>::HeatingCuda(const Grid_t& grid, int interval, int kind, FUNC get_H)
{
foil_ = new cuda_heating_foil{get_H, kind, interval * grid.dt};
}
template<typename BS>
HeatingCuda<BS>::~HeatingCuda()
{
delete foil_;
}
template<typename BS>
void HeatingCuda<BS>::operator()(MparticlesCuda<BS>& mprts)
{
(*foil_)(mprts.cmprts());
}
// ======================================================================
template struct HeatingCuda<BS144>;
template HeatingCuda<BS144>::HeatingCuda(const Grid_t& grid, int interval, int kind, HeatingSpotFoil get_H);
template struct HeatingCuda<BS444>;
template HeatingCuda<BS444>::HeatingCuda(const Grid_t& grid, int interval, int kind, HeatingSpotFoil get_H);
|
c7ec6cc22355d10634a9f24238805572af6987a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <hip/hip_runtime_api.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
hipProfilerStop();
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
c7ec6cc22355d10634a9f24238805572af6987a4.cu
|
#include <iostream>
#include <math.h>
#include <cuda_profiler_api.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
cudaProfilerStop();
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
DistributionGeometricKernel.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::geometric_kernel(iter, p_, generator);
}
REGISTER_DISPATCH(geometric_stub, &geometric_kernel);
}} // namespace at::native
|
DistributionGeometricKernel.cu
|
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::geometric_kernel(iter, p_, generator);
}
REGISTER_DISPATCH(geometric_stub, &geometric_kernel);
}} // namespace at::native
|
e425655f9e5e59128990b5698eb9e1dcb0e2128e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_zmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
magmaDoubleComplex *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_Z_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_z_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zmconjugate(
magma_z_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
hipLaunchKernelGGL(( magma_zmconjugate_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
A->num_rows, A->drow, A->dval );
return info;
}
|
e425655f9e5e59128990b5698eb9e1dcb0e2128e.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_zmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
magmaDoubleComplex *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_Z_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_z_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zmconjugate(
magma_z_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
magma_zmconjugate_kernel<<< grid, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( A->num_rows, A->drow, A->dval );
return info;
}
|
58c892e847034450d8699064344f51f984434370.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/* Includes, cuda */
//#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
/* Number of columns & rows in dictionary */
// TODO: get as input
#define M 300 // num of Dictionary columns
#define N 50 // num of Dictionary rows
#define X 25// number of signals
/* Number of non-zero elements in signal */
int K = 4;
/* Residual error */
double epsilon = 1.0e-7;
/* Max num of iterations - assume as same as num of elements in signal */
int T = N;
/* Sign function */
double sign(double x){return (x>=0) - (x<0);}
/* Matrix indexing convention */
#define id(m, n, ld) (((n) * (ld) + (m)))
int main(int argc, char** argv)
{
hipError_t cudaStat;
hipblasStatus_t status;
hipblasHandle_t handle;
hipStream_t stream[4];
double *h_D, *h_X, *h_C, *c; //host memory pointers
double *d_D = 0, *d_S = 0, *d_R = 0; //device memory pointers
int i;
int MX = M*X;
int NX = M*X;
int MN = M*N, m, n, k, q, t;
const double alpha = 1.0;
const double beta = 0.0;
double norm = sqrt(N), normi, normf, a, dtime;
printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X);
/* Initialize srand and clock */
srand(time(NULL));
clock_t start = clock();
/* Initialize cublas */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
for (i=0 ; i<4 ; i++){
cudaStat = hipStreamCreate(&stream[i]);
if (cudaStat != hipSuccess) {
fprintf (stderr,"! stream create error\n");
return EXIT_FAILURE;
}
hipblasSetStream(handle, stream[i] );
}
/* Initialize dictionary on host */
hipHostMalloc((double*)h_D, (MN * sizeof(h_D[0]))
if (cudaStat != hipSuccess) {
fprintf (stderr,"! stream D error\n");
return EXIT_FAILURE;
}
for(n = 0; n < N; n++){
for(m = 0; m < M; m++){
a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm;
h_D[id(m, n, M)] = a;
}
}
/* Create X random K-sparse signals */
hipHostMalloc((double*)h_X, (M*X * sizeof(h_X[0]))
if (cudaStat != hipSuccess) {
fprintf (stderr,"! stream X error\n");
return EXIT_FAILURE;
}
for (i = 0;i < X;i++){
for(k = 0; k < K; k++){
a = 2.0*rand()/(double)RAND_MAX - 1.0;
h_X[(rand()%M)+i*M] = a;}
}
/* Allocate solution memory on host */
hipHostMalloc((double*)h_C, (M*X * sizeof(h_C[0]))
if (cudaStat != hipSuccess) {
fprintf (stderr,"! stream X error\n");
return EXIT_FAILURE;
}
c = (double*)calloc(1, sizeof(c));
if(c == 0){
fprintf(stderr, " host memory allocation error (c)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: dictionary */
cudaStat = hipMalloc ((void**)&d_D, (MN)*sizeof(d_D[0]));
if (cudaStat != hipSuccess) {
fprintf (stderr,"! device memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
//trasnfer the Host dictionary to Device dictionary
status = hipblasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1);
if(status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! device access error (write dictionary)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: signal */
cudaStat = hipMalloc ((void**)&d_R, MX*sizeof(d_R[0]));
if (cudaStat != hipSuccess) {
fprintf (stderr, "! device memory allocation error (signal)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device access error (write signal)\n");
return EXIT_FAILURE;
}
/*Allocate device memory for Signal Solution */
cudaStat = hipMalloc ((void**)&d_S, NX*sizeof(d_S[0]));
if (cudaStat != hipSuccess) {
fprintf (stderr, "! device memory allocation error (projected vector)\n");
return EXIT_FAILURE;
}
/* Encoding the signal on device*/
for (i = 0;i<X;i++) {
status = hipblasDgemv(handle,HIPBLAS_OP_T, M, N, &alpha, d_D, M,d_R+i*M, 1, &beta, d_S+i*N, 1);
if(status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! kernel execution error (encoding)\n");
return EXIT_FAILURE;
}
}
//dtime = ((double)clock()-start)/CLOCKS_PER_SEC; // TODO : need to remove
//printf("\nTime for encoding: %f(s)",dtime);
/* Decoding the signal on device*/
start = clock();
for (i = 0;i<X;i++) {
//hipStreamSynchronize(stream[i]);
hipblasDnrm2(handle,N, d_S+i*N, 1,&normi);
epsilon = sqrt(epsilon*normi);
normf = normi;
t = 0;
while(normf > epsilon && t < T){
//printf("\n %f",normf);
hipblasDgemv(handle,HIPBLAS_OP_N, M, N, &alpha, d_D, M,d_S+i*N, 1, &beta, d_R+i*M, 1);
hipblasIdamax(handle,M, d_R+i*M, 1,&q);
q = q - 1;
hipblasGetVectorAsync(1, sizeof(c),&d_R[q+i*M], 1, c, 1);
h_C[q+i*M] = *c + h_C[q+i*M];
*c = -(*c);
hipblasDaxpy (handle,N,c, &d_D[q], M, d_S+i*N, 1);
hipblasDnrm2(handle,N, d_S+i*N, 1,&normf);
t++;
}
/*
status = hipblasGetError();
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! kernel execution error (decoding)\n");
return EXIT_FAILURE;
*/
a = 100.0*(normf*normf)/(normi*normi);
// printf("\nComputation residual error: %f",a);
a=0; q=0; *c=0;
epsilon=1.0e-7;
}
dtime = (((double)clock()-start))/CLOCKS_PER_SEC;
printf("\n Total time : %f(s) ",dtime);
/* Check the solution */
/*
printf("\nSolution (first column),Reference (second column):");
getchar(); // Wait for key ...
for(m=0; m<M; m++)
{
printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]);
}
normi = 0; normf = 0;
for(m=0; m<M; m++)
{
normi = normi + h_X[m]*h_X[m];
normf = normf +
(h_C[m] - h_X[m])*(h_C[m] - h_X[m]);
}
printf("\nSolution residual error:%f", 100.0*normf/normi);
*/
/* Memory clean up */
for (i=0 ; i<4 ; i++){
hipStreamDestroy(stream[i]);
}
free(h_D); free(h_X); free(h_C);
cudaStat = hipFree(d_D);
if (cudaStat != hipSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
cudaStat = hipFree(d_S);
if (cudaStat != hipSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
cudaStat = hipFree(d_R);
if (cudaStat != hipSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr,"! cublas shutdown error\n");
return EXIT_FAILURE;
}
if(argc<=1 || strcmp(argv[1],"-noprompt")){
printf("\nPress ENTER to exit...\n");
getchar();
}
return EXIT_SUCCESS;
}
|
58c892e847034450d8699064344f51f984434370.cu
|
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/* Includes, cuda */
//#include <cublas.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
/* Number of columns & rows in dictionary */
// TODO: get as input
#define M 300 // num of Dictionary columns
#define N 50 // num of Dictionary rows
#define X 25// number of signals
/* Number of non-zero elements in signal */
int K = 4;
/* Residual error */
double epsilon = 1.0e-7;
/* Max num of iterations - assume as same as num of elements in signal */
int T = N;
/* Sign function */
double sign(double x){return (x>=0) - (x<0);}
/* Matrix indexing convention */
#define id(m, n, ld) (((n) * (ld) + (m)))
int main(int argc, char** argv)
{
cudaError_t cudaStat;
cublasStatus_t status;
cublasHandle_t handle;
cudaStream_t stream[4];
double *h_D, *h_X, *h_C, *c; //host memory pointers
double *d_D = 0, *d_S = 0, *d_R = 0; //device memory pointers
int i;
int MX = M*X;
int NX = M*X;
int MN = M*N, m, n, k, q, t;
const double alpha = 1.0;
const double beta = 0.0;
double norm = sqrt(N), normi, normf, a, dtime;
printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X);
/* Initialize srand and clock */
srand(time(NULL));
clock_t start = clock();
/* Initialize cublas */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
for (i=0 ; i<4 ; i++){
cudaStat = cudaStreamCreate(&stream[i]);
if (cudaStat != cudaSuccess) {
fprintf (stderr,"! stream create error\n");
return EXIT_FAILURE;
}
cublasSetStream(handle, stream[i] );
}
/* Initialize dictionary on host */
cudaMallocHost((double*)h_D, (MN * sizeof(h_D[0]))
if (cudaStat != cudaSuccess) {
fprintf (stderr,"! stream D error\n");
return EXIT_FAILURE;
}
for(n = 0; n < N; n++){
for(m = 0; m < M; m++){
a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm;
h_D[id(m, n, M)] = a;
}
}
/* Create X random K-sparse signals */
cudaMallocHost((double*)h_X, (M*X * sizeof(h_X[0]))
if (cudaStat != cudaSuccess) {
fprintf (stderr,"! stream X error\n");
return EXIT_FAILURE;
}
for (i = 0;i < X;i++){
for(k = 0; k < K; k++){
a = 2.0*rand()/(double)RAND_MAX - 1.0;
h_X[(rand()%M)+i*M] = a;}
}
/* Allocate solution memory on host */
cudaMallocHost((double*)h_C, (M*X * sizeof(h_C[0]))
if (cudaStat != cudaSuccess) {
fprintf (stderr,"! stream X error\n");
return EXIT_FAILURE;
}
c = (double*)calloc(1, sizeof(c));
if(c == 0){
fprintf(stderr, " host memory allocation error (c)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: dictionary */
cudaStat = cudaMalloc ((void**)&d_D, (MN)*sizeof(d_D[0]));
if (cudaStat != cudaSuccess) {
fprintf (stderr,"! device memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
//trasnfer the Host dictionary to Device dictionary
status = cublasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1);
if(status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! device access error (write dictionary)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: signal */
cudaStat = cudaMalloc ((void**)&d_R, MX*sizeof(d_R[0]));
if (cudaStat != cudaSuccess) {
fprintf (stderr, "! device memory allocation error (signal)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device access error (write signal)\n");
return EXIT_FAILURE;
}
/*Allocate device memory for Signal Solution */
cudaStat = cudaMalloc ((void**)&d_S, NX*sizeof(d_S[0]));
if (cudaStat != cudaSuccess) {
fprintf (stderr, "! device memory allocation error (projected vector)\n");
return EXIT_FAILURE;
}
/* Encoding the signal on device*/
for (i = 0;i<X;i++) {
status = cublasDgemv(handle,CUBLAS_OP_T, M, N, &alpha, d_D, M,d_R+i*M, 1, &beta, d_S+i*N, 1);
if(status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! kernel execution error (encoding)\n");
return EXIT_FAILURE;
}
}
//dtime = ((double)clock()-start)/CLOCKS_PER_SEC; // TODO : need to remove
//printf("\nTime for encoding: %f(s)",dtime);
/* Decoding the signal on device*/
start = clock();
for (i = 0;i<X;i++) {
//cudaStreamSynchronize(stream[i]);
cublasDnrm2(handle,N, d_S+i*N, 1,&normi);
epsilon = sqrt(epsilon*normi);
normf = normi;
t = 0;
while(normf > epsilon && t < T){
//printf("\n %f",normf);
cublasDgemv(handle,CUBLAS_OP_N, M, N, &alpha, d_D, M,d_S+i*N, 1, &beta, d_R+i*M, 1);
cublasIdamax(handle,M, d_R+i*M, 1,&q);
q = q - 1;
cublasGetVectorAsync(1, sizeof(c),&d_R[q+i*M], 1, c, 1);
h_C[q+i*M] = *c + h_C[q+i*M];
*c = -(*c);
cublasDaxpy (handle,N,c, &d_D[q], M, d_S+i*N, 1);
cublasDnrm2(handle,N, d_S+i*N, 1,&normf);
t++;
}
/*
status = cublasGetError();
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! kernel execution error (decoding)\n");
return EXIT_FAILURE;
*/
a = 100.0*(normf*normf)/(normi*normi);
// printf("\nComputation residual error: %f",a);
a=0; q=0; *c=0;
epsilon=1.0e-7;
}
dtime = (((double)clock()-start))/CLOCKS_PER_SEC;
printf("\n Total time : %f(s) ",dtime);
/* Check the solution */
/*
printf("\nSolution (first column),Reference (second column):");
getchar(); // Wait for key ...
for(m=0; m<M; m++)
{
printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]);
}
normi = 0; normf = 0;
for(m=0; m<M; m++)
{
normi = normi + h_X[m]*h_X[m];
normf = normf +
(h_C[m] - h_X[m])*(h_C[m] - h_X[m]);
}
printf("\nSolution residual error:%f", 100.0*normf/normi);
*/
/* Memory clean up */
for (i=0 ; i<4 ; i++){
cudaStreamDestroy(stream[i]);
}
free(h_D); free(h_X); free(h_C);
cudaStat = cudaFree(d_D);
if (cudaStat != cudaSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
cudaStat = cudaFree(d_S);
if (cudaStat != cudaSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
cudaStat = cudaFree(d_R);
if (cudaStat != cudaSuccess) {
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr,"! cublas shutdown error\n");
return EXIT_FAILURE;
}
if(argc<=1 || strcmp(argv[1],"-noprompt")){
printf("\nPress ENTER to exit...\n");
getchar();
}
return EXIT_SUCCESS;
}
|
02b0f97533104205d01d761d93431c5e43be3d24.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaUtils.cuh"
#include <stdio.h>
__global__ void maskErosion_kernel(
hipSurfaceObject_t input, hipSurfaceObject_t output,
unsigned int w, unsigned int h, int erosionPixel)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 pixelCenter = { 0,0,0,0 };
surf2Dread(&pixelCenter, input, x * sizeof(uchar4), y);
if (pixelCenter.w > 0) {
for (int shiftX = -erosionPixel; shiftX <= erosionPixel; shiftX++) {
for (int shiftY = -erosionPixel; shiftY <= erosionPixel; shiftY++) {
if (
(y + shiftY) > 0 &&
(y + shiftY) < h &&
(shiftX + x) > 0 &&
(shiftX + x) < w
)
{
uchar4 pixelNeighbor = { 0,0,0,0 };
surf2Dread(&pixelNeighbor, input, (shiftX + x) * sizeof(uchar4), (y + shiftY));
if (pixelNeighbor.w == 0) {
pixelCenter.w = 0;
surf2Dwrite(pixelCenter, output, x * sizeof(uchar4), y);
return;
}
}
}
}
}
}
void launch_kernel(hipSurfaceObject_t input,hipSurfaceObject_t output, unsigned int w, unsigned int h, int erosionPixel)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(w / block.x, h / block.y, 1);
maskErosion_kernel << < grid, block >> > (input,output, w, h, erosionPixel);
}
void CudaAlogrithm::maskErosion(cudaGraphicsResource_t* cudaTexture, unsigned int w, unsigned int h, int erosionPixel)
{
if (erosionPixel == 0)return;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
hipArray* cuInputArray;
hipMallocArray(&cuInputArray, &channelDesc, w, h, hipArraySurfaceLoadStore);
hipArray* texture_ptr;
hipGraphicsMapResources(1, cudaTexture, 0);
hipGraphicsSubResourceGetMappedArray(&texture_ptr, *cudaTexture, 0, 0);
hipMemcpyArrayToArray(cuInputArray, 0, 0, texture_ptr, 0,0, w * h * sizeof(uchar4));
hipResourceDesc origion;
memset(&origion, 0, sizeof(origion));
origion.resType = hipResourceTypeArray;
origion.res.array.array = texture_ptr;
hipSurfaceObject_t surfObject;
hipCreateSurfaceObject(&surfObject, &origion);
hipResourceDesc copyinput;
memset(©input, 0, sizeof(copyinput));
copyinput.resType = hipResourceTypeArray;
copyinput.res.array.array = cuInputArray;
hipSurfaceObject_t surfObjectCopy;
hipCreateSurfaceObject(&surfObjectCopy, ©input);
// You now have a CUDA Surface object that refers to the GL texture.
// Write to the Surface using CUDA.
launch_kernel(surfObjectCopy,surfObject, w, h, erosionPixel);
// We're not going to use this Surface object again. We'll make a new one next frame.
hipDestroySurfaceObject(surfObject);
hipDestroySurfaceObject(surfObjectCopy);
hipFreeArray(cuInputArray);
hipGraphicsUnmapResources(1, cudaTexture, 0);
}
|
02b0f97533104205d01d761d93431c5e43be3d24.cu
|
#include "cudaUtils.cuh"
#include <stdio.h>
__global__ void maskErosion_kernel(
cudaSurfaceObject_t input, cudaSurfaceObject_t output,
unsigned int w, unsigned int h, int erosionPixel)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 pixelCenter = { 0,0,0,0 };
surf2Dread(&pixelCenter, input, x * sizeof(uchar4), y);
if (pixelCenter.w > 0) {
for (int shiftX = -erosionPixel; shiftX <= erosionPixel; shiftX++) {
for (int shiftY = -erosionPixel; shiftY <= erosionPixel; shiftY++) {
if (
(y + shiftY) > 0 &&
(y + shiftY) < h &&
(shiftX + x) > 0 &&
(shiftX + x) < w
)
{
uchar4 pixelNeighbor = { 0,0,0,0 };
surf2Dread(&pixelNeighbor, input, (shiftX + x) * sizeof(uchar4), (y + shiftY));
if (pixelNeighbor.w == 0) {
pixelCenter.w = 0;
surf2Dwrite(pixelCenter, output, x * sizeof(uchar4), y);
return;
}
}
}
}
}
}
void launch_kernel(cudaSurfaceObject_t input,cudaSurfaceObject_t output, unsigned int w, unsigned int h, int erosionPixel)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(w / block.x, h / block.y, 1);
maskErosion_kernel << < grid, block >> > (input,output, w, h, erosionPixel);
}
void CudaAlogrithm::maskErosion(cudaGraphicsResource_t* cudaTexture, unsigned int w, unsigned int h, int erosionPixel)
{
if (erosionPixel == 0)return;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
cudaArray* cuInputArray;
cudaMallocArray(&cuInputArray, &channelDesc, w, h, cudaArraySurfaceLoadStore);
cudaArray* texture_ptr;
cudaGraphicsMapResources(1, cudaTexture, 0);
cudaGraphicsSubResourceGetMappedArray(&texture_ptr, *cudaTexture, 0, 0);
cudaMemcpyArrayToArray(cuInputArray, 0, 0, texture_ptr, 0,0, w * h * sizeof(uchar4));
cudaResourceDesc origion;
memset(&origion, 0, sizeof(origion));
origion.resType = cudaResourceTypeArray;
origion.res.array.array = texture_ptr;
cudaSurfaceObject_t surfObject;
cudaCreateSurfaceObject(&surfObject, &origion);
cudaResourceDesc copyinput;
memset(©input, 0, sizeof(copyinput));
copyinput.resType = cudaResourceTypeArray;
copyinput.res.array.array = cuInputArray;
cudaSurfaceObject_t surfObjectCopy;
cudaCreateSurfaceObject(&surfObjectCopy, ©input);
// You now have a CUDA Surface object that refers to the GL texture.
// Write to the Surface using CUDA.
launch_kernel(surfObjectCopy,surfObject, w, h, erosionPixel);
// We're not going to use this Surface object again. We'll make a new one next frame.
cudaDestroySurfaceObject(surfObject);
cudaDestroySurfaceObject(surfObjectCopy);
cudaFreeArray(cuInputArray);
cudaGraphicsUnmapResources(1, cudaTexture, 0);
}
|
e4894ee77d1113126a100767c50517d21af38d81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "export.h"
#include <hip/hip_runtime.h>
#include <cutil.h>
/*
DLM for calculating NDVI on CUDA, called from IDL as
CUDA_NDVI, NIR, RED, NDVI
where NIR and RED are input spectral bands (byte) and ndvi is output (can be undefined)
*/
// the kernel
__global__ void cu_ndvi(unsigned char *a, unsigned char *b, float *out, int width, int height)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < 0 || col < 0 || row > height -1 || col > width -1 )
return;
long idx = row * width + col;
if (((float)a[idx] + (float)b[idx]) == 0)
out[idx] = 0.0;
else
out[idx] = ((float)a[idx] - (float)b[idx])/((float)a[idx] + (float)b[idx]);
}
// the host (DLM) routine
void IDL_CDECL cuda_ndvi(int argc, IDL_VPTR argv[])
{
// grab the input image byte pointers
unsigned char * img0Ptr = (unsigned char * ) argv[0]->value.arr->data;
unsigned char * img1Ptr = (unsigned char * ) argv[1]->value.arr->data;
// get the dimensions (same for all three arrays)
IDL_LONG ndim = argv[0]->value.arr->n_dim;
IDL_LONG * dim = argv[0]->value.arr->dim;
IDL_LONG cols = dim[0];
IDL_LONG rows = dim[1];
// create the output array
IDL_VPTR ivOutArray;
float * imgOutPtr = (float * ) IDL_MakeTempArray( (int) IDL_TYP_FLOAT, ndim,
dim, IDL_ARR_INI_ZERO, &ivOutArray);
//Setting up the device variables to hold the data from the host
unsigned char * a0_d; // Pointer to device array for image 0
unsigned char * a1_d; // Pointer to device array for image 1
float * a2_d; // Pointer to device array for image output
const long N = cols * rows; // Number of elements in arrays
size_t size = N * sizeof(unsigned char);
hipMalloc((void **) &a0_d, size); // Allocate array on device
hipMalloc((void **) &a1_d, size); // Allocate array on device
hipMalloc((void **) &a2_d, N * sizeof(float)); // Allocate array on device, can be left blank
hipMemcpy(a0_d, img0Ptr, size, hipMemcpyHostToDevice);
hipMemcpy(a1_d, img1Ptr, size, hipMemcpyHostToDevice);
// Setting up device configurations
dim3 block(16,16); //16 X 16 blocks for a total of 256 threads
dim3 grid (cols/16 +(cols%16 == 0 ? 0:1), rows/16 + (rows%16 == 0 ? 0:1));
//Actual call to the device for processing.
hipLaunchKernelGGL(( cu_ndvi) , dim3(grid), dim3(block) , 0, 0, a0_d, a1_d, a2_d, cols, rows);
//Synchronize the threads and stop the timer
hipDeviceSynchronize();
hipMemcpy(imgOutPtr, a2_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// copy the temporary array to the IDL output parameter
IDL_VarCopy(ivOutArray,argv[2]);
hipFree(a0_d);
hipFree(a1_d);
hipFree(a2_d);
}
// the entry point, which loads the routine into IDL
int IDL_Load(void)
{
static IDL_SYSFUN_DEF2 procedure_addr[] = {
{ (IDL_SYSRTN_GENERIC) cuda_ndvi, "CUDA_NDVI", 0, 3, 0, 0 }
};
return IDL_SysRtnAdd(procedure_addr, IDL_FALSE, 1);
}
|
e4894ee77d1113126a100767c50517d21af38d81.cu
|
#include <stdio.h>
#include "export.h"
#include <cuda.h>
#include <cutil.h>
/*
DLM for calculating NDVI on CUDA, called from IDL as
CUDA_NDVI, NIR, RED, NDVI
where NIR and RED are input spectral bands (byte) and ndvi is output (can be undefined)
*/
// the kernel
__global__ void cu_ndvi(unsigned char *a, unsigned char *b, float *out, int width, int height)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < 0 || col < 0 || row > height -1 || col > width -1 )
return;
long idx = row * width + col;
if (((float)a[idx] + (float)b[idx]) == 0)
out[idx] = 0.0;
else
out[idx] = ((float)a[idx] - (float)b[idx])/((float)a[idx] + (float)b[idx]);
}
// the host (DLM) routine
void IDL_CDECL cuda_ndvi(int argc, IDL_VPTR argv[])
{
// grab the input image byte pointers
unsigned char * img0Ptr = (unsigned char * ) argv[0]->value.arr->data;
unsigned char * img1Ptr = (unsigned char * ) argv[1]->value.arr->data;
// get the dimensions (same for all three arrays)
IDL_LONG ndim = argv[0]->value.arr->n_dim;
IDL_LONG * dim = argv[0]->value.arr->dim;
IDL_LONG cols = dim[0];
IDL_LONG rows = dim[1];
// create the output array
IDL_VPTR ivOutArray;
float * imgOutPtr = (float * ) IDL_MakeTempArray( (int) IDL_TYP_FLOAT, ndim,
dim, IDL_ARR_INI_ZERO, &ivOutArray);
//Setting up the device variables to hold the data from the host
unsigned char * a0_d; // Pointer to device array for image 0
unsigned char * a1_d; // Pointer to device array for image 1
float * a2_d; // Pointer to device array for image output
const long N = cols * rows; // Number of elements in arrays
size_t size = N * sizeof(unsigned char);
cudaMalloc((void **) &a0_d, size); // Allocate array on device
cudaMalloc((void **) &a1_d, size); // Allocate array on device
cudaMalloc((void **) &a2_d, N * sizeof(float)); // Allocate array on device, can be left blank
cudaMemcpy(a0_d, img0Ptr, size, cudaMemcpyHostToDevice);
cudaMemcpy(a1_d, img1Ptr, size, cudaMemcpyHostToDevice);
// Setting up device configurations
dim3 block(16,16); //16 X 16 blocks for a total of 256 threads
dim3 grid (cols/16 +(cols%16 == 0 ? 0:1), rows/16 + (rows%16 == 0 ? 0:1));
//Actual call to the device for processing.
cu_ndvi <<< grid, block >>> (a0_d, a1_d, a2_d, cols, rows);
//Synchronize the threads and stop the timer
cudaThreadSynchronize();
cudaMemcpy(imgOutPtr, a2_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// copy the temporary array to the IDL output parameter
IDL_VarCopy(ivOutArray,argv[2]);
cudaFree(a0_d);
cudaFree(a1_d);
cudaFree(a2_d);
}
// the entry point, which loads the routine into IDL
int IDL_Load(void)
{
static IDL_SYSFUN_DEF2 procedure_addr[] = {
{ (IDL_SYSRTN_GENERIC) cuda_ndvi, "CUDA_NDVI", 0, 3, 0, 0 }
};
return IDL_SysRtnAdd(procedure_addr, IDL_FALSE, 1);
}
|
a624628ba9d170321bbd8e709a9faaf09cfe5f6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6>
__global__
void set_intersection_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 keys_result,
Iterator5 values_result,
Iterator6 result)
{
*result = thrust::set_intersection_by_key(thrust::seq, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, keys_result, values_result);
}
void TestSetIntersectionByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
hipLaunchKernelGGL(( set_intersection_by_key_kernel), dim3(1),dim3(1), 0, 0, a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
thrust::pair<Iterator,Iterator> end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceSeq);
|
a624628ba9d170321bbd8e709a9faaf09cfe5f6d.cu
|
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6>
__global__
void set_intersection_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 keys_result,
Iterator5 values_result,
Iterator6 result)
{
*result = thrust::set_intersection_by_key(thrust::seq, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, keys_result, values_result);
}
void TestSetIntersectionByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
set_intersection_by_key_kernel<<<1,1>>>(a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
thrust::pair<Iterator,Iterator> end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceSeq);
|
fc22ba42a950d730b54eeb839423e2ff892699ea.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "math.hpp"
#include "array.hpp"
#include "limits.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/fill_copy.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
#include <type_traits>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Order,
typename std::enable_if<Order == 2 || Order == 3, bool>::type = true> /* Order has been hardcoded; see code */
__global__ void max_pooling_with_indices(
Span<T> output, Span<T> indices, View<T> input, size_type channels,
array<size_type, Order> out_spatial_dims, array<size_type, Order> in_spatial_dims,
array<size_type, Order> window_size, array<size_type, Order> strides, array<size_type, Order> padding_left)
{
/* every element in the output is mapped to a window in the input and each thread processes several windows */
for (auto idx : grid_stride_range(output.size())) {
size_type out_spatial_size = 1;
array<index_type, Order> window_idx;
for (int i = Order - 1; i >= 0; i--) {
window_idx[i] = (idx / out_spatial_size) % out_spatial_dims[i];
out_spatial_size *= out_spatial_dims[i];
}
const index_type n = idx / (out_spatial_size * channels);
const index_type c = (idx / out_spatial_size) % channels;
array<index_type, Order> start;
for(int i = 0; i < Order; i++)
start[i] = window_idx[i] * strides[i] - padding_left[i];
array<index_type, Order> end;
for (int i = 0; i < Order; i++) {
using device::min;
end[i] = min<index_type>(start[i] + window_size[i], in_spatial_dims[i]);
}
for (int i = 0; i < Order; i++) {
using device::max;
start[i] = max(start[i], 0);
}
T max_value = numeric_limits<T>::lowest();
index_type max_idx = -1;
size_type in_spatial_size = 1;
for (int i = 0; i < Order; i++)
in_spatial_size *= in_spatial_dims[i];
const auto outer_offset = (n * channels + c) * in_spatial_size;
if (Order == 2) {
array<index_type, Order> idx;
for (idx[0] = start[0]; idx[0] != end[0]; idx[0]++) {
for (idx[1] = start[1]; idx[1] != end[1]; idx[1]++) {
index_type offset = 0;
index_type stride = 1;
for (int i = Order - 1; i >= 0; i--) {
offset += stride * idx[i];
stride *= in_spatial_dims[i];
}
if (input[outer_offset + offset] > max_value) {
max_idx = offset;
max_value = input[outer_offset + offset];
}
}
}
} else if(Order == 3) {
array<index_type, Order> idx;
for (idx[0] = start[0]; idx[0] != end[0]; idx[0]++) {
for (idx[1] = start[1]; idx[1] != end[1]; idx[1]++) {
for (idx[2] = start[2]; idx[2] != end[2]; idx[2]++) {
index_type offset = 0;
index_type stride = 1;
for (int i = Order - 1; i >= 0; i--) {
offset += stride * idx[i];
stride *= in_spatial_dims[i];
}
if (input[outer_offset + offset] > max_value) {
max_idx = offset;
max_value = input[outer_offset + offset];
}
}
}
}
}
output[idx] = max_value;
indices[idx] = max_idx;
}
}
template <class T, std::size_t Order>
__global__ void max_unpooling(
Span<T> output, View<T> input, View<T> indices, size_type channels,
array<size_type, Order> out_spatial_dims, array<size_type, Order> in_spatial_dims,
array<size_type, Order> window_size, array<size_type, Order> strides, array<size_type, Order> padding_left)
{
/* the output has already been zero filled */
/* Every input value represents a window in the output. The max unpooling operation
* copies the input value to exactly one location in the output window which is given
* by the indices tensor.
*/
for (auto idx : grid_stride_range(input.size())) {
size_type in_spatial_size = 1;
array<index_type, Order> window_idx;
for (int i = Order - 1; i >= 0; i--) {
window_idx[i] = (idx / in_spatial_size) % in_spatial_dims[i];
in_spatial_size *= in_spatial_dims[i];
}
const index_type n = idx / (in_spatial_size * channels);
const index_type c = (idx / in_spatial_size) % channels;
array<index_type, Order> start;
for (int i = 0; i < Order; i++) {
using device::min;
using device::max;
start[i] = max(0, min(window_idx[i] * strides[i] - padding_left[i], out_spatial_dims[i] - 1));
}
size_type out_spatial_size = 1;
for (int i = 0; i < Order; i++)
out_spatial_size *= out_spatial_dims[i];
index_type outer_offset = (n * channels + c) * out_spatial_size;
output[outer_offset + static_cast<index_type>(indices[idx])] = input[idx];
}
}
}
template <class T, std::size_t Order> static
void launch_max_pooling_kernel(
const Stream& stream,
Span<T> output, Span<T> indices, View<T> input, std::size_t channels,
const std::vector<std::size_t>& out_spatial_dims, const std::vector<std::size_t>& in_spatial_dims,
const std::vector<std::size_t>& window_size,
const std::vector<std::size_t>& strides, const std::vector<std::size_t>& padding_left)
{
CV_Assert(indices.size() == output.size());
CV_Assert(out_spatial_dims.size() == Order);
CV_Assert(in_spatial_dims.size() == Order);
CV_Assert(window_size.size() == Order);
CV_Assert(strides.size() == Order);
CV_Assert(padding_left.size() == Order);
array<size_type, Order> out_spatial_dims_k, in_spatial_dims_k;
out_spatial_dims_k.assign(std::begin(out_spatial_dims), std::end(out_spatial_dims));
in_spatial_dims_k.assign(std::begin(in_spatial_dims), std::end(in_spatial_dims));
array<size_type, Order> window_size_k, strides_k, padding_left_k;
window_size_k.assign(std::begin(window_size), std::end(window_size));
strides_k.assign(std::begin(strides), std::end(strides));
padding_left_k.assign(std::begin(padding_left), std::end(padding_left));
auto kernel = raw::max_pooling_with_indices<T, Order>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, indices, input, channels,
out_spatial_dims_k, in_spatial_dims_k, window_size_k, strides_k, padding_left_k);
}
template <class T>
void max_pooling_with_indices(
const Stream& stream,
TensorSpan<T> output, TensorSpan<T> indices, TensorView<T> input,
const std::vector<std::size_t>& window_size, const std::vector<std::size_t>& strides,
const std::vector<std::size_t>& padding_left)
{
CV_Assert(is_shape_same(output, indices));
CV_Assert(input.get_axis_size(1) == output.get_axis_size(1));
auto order = window_size.size();
CV_Assert(strides.size() == order);
CV_Assert(padding_left.size() == order);
CV_Assert(output.rank() == order + 2);
CV_Assert(input.rank() == order + 2);
std::vector<std::size_t> out_spatial_dims(order), in_spatial_dims(order);
for (int i = 0; i < order; i++) {
in_spatial_dims[i] = input.get_axis_size(2 + i);
out_spatial_dims[i] = output.get_axis_size(2 + i);
}
/* only max_pooling2d and max_pooling3d are supported */
CV_Assert(2 <= order && order <= 3);
std::size_t channels = input.get_axis_size(1);
if (order == 3) {
launch_max_pooling_kernel<T, 3>(stream, output, indices, input, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
} else if (order == 2) {
launch_max_pooling_kernel<T, 2>(stream, output, indices, input, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void max_pooling_with_indices(const Stream&,
TensorSpan<__half>, TensorSpan<__half>, TensorView<__half>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
#endif
template void max_pooling_with_indices(const Stream&,
TensorSpan<float>, TensorSpan<float>, TensorView<float>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
template <class T, std::size_t Order> static
void launch_max_unpooling_kernel(
const Stream& stream,
Span<T> output, View<T> input, View<T> indices, std::size_t channels,
const std::vector<std::size_t>& out_spatial_dims, const std::vector<std::size_t>& in_spatial_dims,
const std::vector<std::size_t>& window_size,
const std::vector<std::size_t>& strides, const std::vector<std::size_t>& padding_left)
{
CV_Assert(out_spatial_dims.size() == Order);
CV_Assert(in_spatial_dims.size() == Order);
CV_Assert(window_size.size() == Order);
CV_Assert(strides.size() == Order);
CV_Assert(padding_left.size() == Order);
CV_Assert(indices.size() == input.size());
array<size_type, Order> out_spatial_dims_k, in_spatial_dims_k;
out_spatial_dims_k.assign(std::begin(out_spatial_dims), std::end(out_spatial_dims));
in_spatial_dims_k.assign(std::begin(in_spatial_dims), std::end(in_spatial_dims));
array<size_type, Order> window_size_k, strides_k, padding_left_k;
window_size_k.assign(std::begin(window_size), std::end(window_size));
strides_k.assign(std::begin(strides), std::end(strides));
padding_left_k.assign(std::begin(padding_left), std::end(padding_left));
auto kernel = raw::max_unpooling<T, Order>;
auto policy = make_policy(kernel, input.size(), 0, stream);
launch_kernel(kernel, policy, output, input, indices, channels,
out_spatial_dims_k, in_spatial_dims_k, window_size_k, strides_k, padding_left_k);
}
template <class T>
void max_unpooling(
const Stream& stream,
TensorSpan<T> output, TensorView<T> input, TensorView<T> indices,
const std::vector<std::size_t>& window_size, const std::vector<std::size_t>& strides,
const std::vector<std::size_t>& padding_left)
{
CV_Assert(is_shape_same(input, indices));
CV_Assert(input.get_axis_size(1) == output.get_axis_size(1));
auto order = window_size.size();
CV_Assert(strides.size() == order);
CV_Assert(padding_left.size() == order);
CV_Assert(output.rank() == order + 2);
CV_Assert(input.rank() == order + 2);
std::vector<std::size_t> out_spatial_dims(order), in_spatial_dims(order);
for (int i = 0; i < order; i++) {
in_spatial_dims[i] = input.get_axis_size(2 + i);
out_spatial_dims[i] = output.get_axis_size(2 + i);
}
kernels::fill<T>(stream, output, 0.0);
/* only max_unpooling2d and max_unpooling3d are supported */
CV_Assert(2 <= order && order <= 3);
std::size_t channels = input.get_axis_size(1);
if (order == 3) {
launch_max_unpooling_kernel<T, 3>(stream, output, input, indices, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
} else if (order == 2) {
launch_max_unpooling_kernel<T, 2>(stream, output, input, indices, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void max_unpooling(const Stream&,
TensorSpan<__half>, TensorView<__half>, TensorView<__half>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
#endif
template void max_unpooling(const Stream&,
TensorSpan<float>, TensorView<float>, TensorView<float>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
fc22ba42a950d730b54eeb839423e2ff892699ea.cu
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "math.hpp"
#include "array.hpp"
#include "limits.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/fill_copy.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
#include <type_traits>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Order,
typename std::enable_if<Order == 2 || Order == 3, bool>::type = true> /* Order has been hardcoded; see code */
__global__ void max_pooling_with_indices(
Span<T> output, Span<T> indices, View<T> input, size_type channels,
array<size_type, Order> out_spatial_dims, array<size_type, Order> in_spatial_dims,
array<size_type, Order> window_size, array<size_type, Order> strides, array<size_type, Order> padding_left)
{
/* every element in the output is mapped to a window in the input and each thread processes several windows */
for (auto idx : grid_stride_range(output.size())) {
size_type out_spatial_size = 1;
array<index_type, Order> window_idx;
for (int i = Order - 1; i >= 0; i--) {
window_idx[i] = (idx / out_spatial_size) % out_spatial_dims[i];
out_spatial_size *= out_spatial_dims[i];
}
const index_type n = idx / (out_spatial_size * channels);
const index_type c = (idx / out_spatial_size) % channels;
array<index_type, Order> start;
for(int i = 0; i < Order; i++)
start[i] = window_idx[i] * strides[i] - padding_left[i];
array<index_type, Order> end;
for (int i = 0; i < Order; i++) {
using device::min;
end[i] = min<index_type>(start[i] + window_size[i], in_spatial_dims[i]);
}
for (int i = 0; i < Order; i++) {
using device::max;
start[i] = max(start[i], 0);
}
T max_value = numeric_limits<T>::lowest();
index_type max_idx = -1;
size_type in_spatial_size = 1;
for (int i = 0; i < Order; i++)
in_spatial_size *= in_spatial_dims[i];
const auto outer_offset = (n * channels + c) * in_spatial_size;
if (Order == 2) {
array<index_type, Order> idx;
for (idx[0] = start[0]; idx[0] != end[0]; idx[0]++) {
for (idx[1] = start[1]; idx[1] != end[1]; idx[1]++) {
index_type offset = 0;
index_type stride = 1;
for (int i = Order - 1; i >= 0; i--) {
offset += stride * idx[i];
stride *= in_spatial_dims[i];
}
if (input[outer_offset + offset] > max_value) {
max_idx = offset;
max_value = input[outer_offset + offset];
}
}
}
} else if(Order == 3) {
array<index_type, Order> idx;
for (idx[0] = start[0]; idx[0] != end[0]; idx[0]++) {
for (idx[1] = start[1]; idx[1] != end[1]; idx[1]++) {
for (idx[2] = start[2]; idx[2] != end[2]; idx[2]++) {
index_type offset = 0;
index_type stride = 1;
for (int i = Order - 1; i >= 0; i--) {
offset += stride * idx[i];
stride *= in_spatial_dims[i];
}
if (input[outer_offset + offset] > max_value) {
max_idx = offset;
max_value = input[outer_offset + offset];
}
}
}
}
}
output[idx] = max_value;
indices[idx] = max_idx;
}
}
template <class T, std::size_t Order>
__global__ void max_unpooling(
Span<T> output, View<T> input, View<T> indices, size_type channels,
array<size_type, Order> out_spatial_dims, array<size_type, Order> in_spatial_dims,
array<size_type, Order> window_size, array<size_type, Order> strides, array<size_type, Order> padding_left)
{
/* the output has already been zero filled */
/* Every input value represents a window in the output. The max unpooling operation
* copies the input value to exactly one location in the output window which is given
* by the indices tensor.
*/
for (auto idx : grid_stride_range(input.size())) {
size_type in_spatial_size = 1;
array<index_type, Order> window_idx;
for (int i = Order - 1; i >= 0; i--) {
window_idx[i] = (idx / in_spatial_size) % in_spatial_dims[i];
in_spatial_size *= in_spatial_dims[i];
}
const index_type n = idx / (in_spatial_size * channels);
const index_type c = (idx / in_spatial_size) % channels;
array<index_type, Order> start;
for (int i = 0; i < Order; i++) {
using device::min;
using device::max;
start[i] = max(0, min(window_idx[i] * strides[i] - padding_left[i], out_spatial_dims[i] - 1));
}
size_type out_spatial_size = 1;
for (int i = 0; i < Order; i++)
out_spatial_size *= out_spatial_dims[i];
index_type outer_offset = (n * channels + c) * out_spatial_size;
output[outer_offset + static_cast<index_type>(indices[idx])] = input[idx];
}
}
}
template <class T, std::size_t Order> static
void launch_max_pooling_kernel(
const Stream& stream,
Span<T> output, Span<T> indices, View<T> input, std::size_t channels,
const std::vector<std::size_t>& out_spatial_dims, const std::vector<std::size_t>& in_spatial_dims,
const std::vector<std::size_t>& window_size,
const std::vector<std::size_t>& strides, const std::vector<std::size_t>& padding_left)
{
CV_Assert(indices.size() == output.size());
CV_Assert(out_spatial_dims.size() == Order);
CV_Assert(in_spatial_dims.size() == Order);
CV_Assert(window_size.size() == Order);
CV_Assert(strides.size() == Order);
CV_Assert(padding_left.size() == Order);
array<size_type, Order> out_spatial_dims_k, in_spatial_dims_k;
out_spatial_dims_k.assign(std::begin(out_spatial_dims), std::end(out_spatial_dims));
in_spatial_dims_k.assign(std::begin(in_spatial_dims), std::end(in_spatial_dims));
array<size_type, Order> window_size_k, strides_k, padding_left_k;
window_size_k.assign(std::begin(window_size), std::end(window_size));
strides_k.assign(std::begin(strides), std::end(strides));
padding_left_k.assign(std::begin(padding_left), std::end(padding_left));
auto kernel = raw::max_pooling_with_indices<T, Order>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, indices, input, channels,
out_spatial_dims_k, in_spatial_dims_k, window_size_k, strides_k, padding_left_k);
}
template <class T>
void max_pooling_with_indices(
const Stream& stream,
TensorSpan<T> output, TensorSpan<T> indices, TensorView<T> input,
const std::vector<std::size_t>& window_size, const std::vector<std::size_t>& strides,
const std::vector<std::size_t>& padding_left)
{
CV_Assert(is_shape_same(output, indices));
CV_Assert(input.get_axis_size(1) == output.get_axis_size(1));
auto order = window_size.size();
CV_Assert(strides.size() == order);
CV_Assert(padding_left.size() == order);
CV_Assert(output.rank() == order + 2);
CV_Assert(input.rank() == order + 2);
std::vector<std::size_t> out_spatial_dims(order), in_spatial_dims(order);
for (int i = 0; i < order; i++) {
in_spatial_dims[i] = input.get_axis_size(2 + i);
out_spatial_dims[i] = output.get_axis_size(2 + i);
}
/* only max_pooling2d and max_pooling3d are supported */
CV_Assert(2 <= order && order <= 3);
std::size_t channels = input.get_axis_size(1);
if (order == 3) {
launch_max_pooling_kernel<T, 3>(stream, output, indices, input, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
} else if (order == 2) {
launch_max_pooling_kernel<T, 2>(stream, output, indices, input, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void max_pooling_with_indices(const Stream&,
TensorSpan<__half>, TensorSpan<__half>, TensorView<__half>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
#endif
template void max_pooling_with_indices(const Stream&,
TensorSpan<float>, TensorSpan<float>, TensorView<float>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
template <class T, std::size_t Order> static
void launch_max_unpooling_kernel(
const Stream& stream,
Span<T> output, View<T> input, View<T> indices, std::size_t channels,
const std::vector<std::size_t>& out_spatial_dims, const std::vector<std::size_t>& in_spatial_dims,
const std::vector<std::size_t>& window_size,
const std::vector<std::size_t>& strides, const std::vector<std::size_t>& padding_left)
{
CV_Assert(out_spatial_dims.size() == Order);
CV_Assert(in_spatial_dims.size() == Order);
CV_Assert(window_size.size() == Order);
CV_Assert(strides.size() == Order);
CV_Assert(padding_left.size() == Order);
CV_Assert(indices.size() == input.size());
array<size_type, Order> out_spatial_dims_k, in_spatial_dims_k;
out_spatial_dims_k.assign(std::begin(out_spatial_dims), std::end(out_spatial_dims));
in_spatial_dims_k.assign(std::begin(in_spatial_dims), std::end(in_spatial_dims));
array<size_type, Order> window_size_k, strides_k, padding_left_k;
window_size_k.assign(std::begin(window_size), std::end(window_size));
strides_k.assign(std::begin(strides), std::end(strides));
padding_left_k.assign(std::begin(padding_left), std::end(padding_left));
auto kernel = raw::max_unpooling<T, Order>;
auto policy = make_policy(kernel, input.size(), 0, stream);
launch_kernel(kernel, policy, output, input, indices, channels,
out_spatial_dims_k, in_spatial_dims_k, window_size_k, strides_k, padding_left_k);
}
template <class T>
void max_unpooling(
const Stream& stream,
TensorSpan<T> output, TensorView<T> input, TensorView<T> indices,
const std::vector<std::size_t>& window_size, const std::vector<std::size_t>& strides,
const std::vector<std::size_t>& padding_left)
{
CV_Assert(is_shape_same(input, indices));
CV_Assert(input.get_axis_size(1) == output.get_axis_size(1));
auto order = window_size.size();
CV_Assert(strides.size() == order);
CV_Assert(padding_left.size() == order);
CV_Assert(output.rank() == order + 2);
CV_Assert(input.rank() == order + 2);
std::vector<std::size_t> out_spatial_dims(order), in_spatial_dims(order);
for (int i = 0; i < order; i++) {
in_spatial_dims[i] = input.get_axis_size(2 + i);
out_spatial_dims[i] = output.get_axis_size(2 + i);
}
kernels::fill<T>(stream, output, 0.0);
/* only max_unpooling2d and max_unpooling3d are supported */
CV_Assert(2 <= order && order <= 3);
std::size_t channels = input.get_axis_size(1);
if (order == 3) {
launch_max_unpooling_kernel<T, 3>(stream, output, input, indices, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
} else if (order == 2) {
launch_max_unpooling_kernel<T, 2>(stream, output, input, indices, channels,
out_spatial_dims, in_spatial_dims, window_size, strides, padding_left);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void max_unpooling(const Stream&,
TensorSpan<__half>, TensorView<__half>, TensorView<__half>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
#endif
template void max_unpooling(const Stream&,
TensorSpan<float>, TensorView<float>, TensorView<float>,
const std::vector<std::size_t>&, const std::vector<std::size_t>&,
const std::vector<std::size_t>&);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
ca21a9911c14fae57e17cb2b964cc17c0f45c029.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/version.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <ctime>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
using namespace std;
__global__ void vectoradd(float* a,float* b, float* c){
unsigned int thread_id=threadIdx.x + blockIdx.x*blockDim.x;
c[thread_id]=a[thread_id]+b[thread_id];
}
int main(){
clock_t timestamp=clock();
hipFree(0);
int N=1000;
size_t size=N*sizeof(float);
float* h_a=(float*)malloc(size);
float* h_b=(float*)malloc(size);
float* h_c=(float*)malloc(size);
for(int i=0;i<N;i++)
{
h_a[i]=1.0f;
h_b[i]=3.0f;
h_c[i]=0.0f;
}
float* d_a;
float* d_b;
float* d_c;
hipMalloc(&d_a,size);
hipMalloc(&d_b,size);
hipMalloc(&d_c,size);
hipMemcpy(d_a,h_a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectoradd), dim3(4),dim3(256), 0, 0, d_a,d_b,d_c);
hipMemcpy(h_c,d_c,size,hipMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout << h_c[i] << " ";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
cout << "hekll"<<endl << timestamp <<endl;
return 0;
}
|
ca21a9911c14fae57e17cb2b964cc17c0f45c029.cu
|
#include <thrust/version.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <ctime>
#include <iostream>
#include <cuda.h>
#include <curand.h>
using namespace std;
__global__ void vectoradd(float* a,float* b, float* c){
unsigned int thread_id=threadIdx.x + blockIdx.x*blockDim.x;
c[thread_id]=a[thread_id]+b[thread_id];
}
int main(){
clock_t timestamp=clock();
cudaFree(0);
int N=1000;
size_t size=N*sizeof(float);
float* h_a=(float*)malloc(size);
float* h_b=(float*)malloc(size);
float* h_c=(float*)malloc(size);
for(int i=0;i<N;i++)
{
h_a[i]=1.0f;
h_b[i]=3.0f;
h_c[i]=0.0f;
}
float* d_a;
float* d_b;
float* d_c;
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,size,cudaMemcpyHostToDevice);
vectoradd<<<4,256>>>(d_a,d_b,d_c);
cudaMemcpy(h_c,d_c,size,cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout << h_c[i] << " ";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
cout << "hekll"<<endl << timestamp <<endl;
return 0;
}
|
f2f46a281550cc29fa6201358dee0ce65123b153.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise3.cu
* @author Alessandro Capotondi
* @date 5 May 2020
* @brief Exercise 3 - Image Luminance Histogram
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define NBINS 256
void hist(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
#pragma omp parallel for
for (int i = 0; i < width * height; i++)
{
int val = im[i];
#pragma omp atomic
hist[val]++;
}
}
//TODO Ex3-a) Implement Histogram Calculation. Using Global Accesses
__global__ void hist_v1(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
}
//TODO Ex3-b) Implement Histogram Calculation. Exploiting Shared Memory
__global__ void hist_v2(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
}
int main(int argc, char *argv[])
{
int iret = 0;
struct timespec rt[2];
double wt; // walltime
int hist_host[NBINS], hist_gpu[NBINS];
string filename("data/buzz.jpg");
if (argc > 1)
filename = argv[1];
// Load Image
Mat image = imread(filename, IMREAD_GRAYSCALE);
if (!image.data)
{
cout << "Could not open or find the image" << std::endl;
return -1;
}
int width = image.size().width;
int height = image.size().height;
// Set Output Memory
memset(hist_host, 0, NBINS * sizeof(int));
memset(hist_gpu, 0, NBINS * sizeof(int));
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
hist(image.ptr(), hist_host, width, height);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist (Host) : %9.6f sec\n", wt);
//CUDA Buffer Allocation
int *d_hist_gpu;
unsigned char *d_image;
gpuErrchk(hipMalloc((void **)&d_hist_gpu, sizeof(int) * NBINS));
gpuErrchk(hipMalloc((void **)&d_image, sizeof(unsigned char) * width * height));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Copy Image to the device
//TODO Define Grid and Block
//TODO Launch Kernel hist_v1
gpuErrchk(hipPeekAtLastError());
//TODO Copy histogram from the device
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist (GPU) : %9.6f sec\n", wt);
for (int i = 0; i < NBINS; i++)
{
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
assert(iret == 0);
}
// Reset Output
gpuErrchk(hipMemset(d_hist_gpu, 0, NBINS * sizeof(unsigned int)));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Copy Image to the device
//Use the same dimBlock, dimGrid of previous version
//TODO Launch Kernel hist_v2
gpuErrchk(hipPeekAtLastError());
//TODO Copy histogram from the device
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist-2 (GPU) : %9.6f sec\n", wt);
for (int i = 0; i < NBINS; i++)
{
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
assert(iret == 0);
}
gpuErrchk(hipFree(d_hist_gpu));
gpuErrchk(hipFree(d_image));
return iret;
}
|
f2f46a281550cc29fa6201358dee0ce65123b153.cu
|
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise3.cu
* @author Alessandro Capotondi
* @date 5 May 2020
* @brief Exercise 3 - Image Luminance Histogram
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define NBINS 256
void hist(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
#pragma omp parallel for
for (int i = 0; i < width * height; i++)
{
int val = im[i];
#pragma omp atomic
hist[val]++;
}
}
//TODO Ex3-a) Implement Histogram Calculation. Using Global Accesses
__global__ void hist_v1(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
}
//TODO Ex3-b) Implement Histogram Calculation. Exploiting Shared Memory
__global__ void hist_v2(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
{
}
int main(int argc, char *argv[])
{
int iret = 0;
struct timespec rt[2];
double wt; // walltime
int hist_host[NBINS], hist_gpu[NBINS];
string filename("data/buzz.jpg");
if (argc > 1)
filename = argv[1];
// Load Image
Mat image = imread(filename, IMREAD_GRAYSCALE);
if (!image.data)
{
cout << "Could not open or find the image" << std::endl;
return -1;
}
int width = image.size().width;
int height = image.size().height;
// Set Output Memory
memset(hist_host, 0, NBINS * sizeof(int));
memset(hist_gpu, 0, NBINS * sizeof(int));
// Compute CPU Version - Golden Model
clock_gettime(CLOCK_REALTIME, rt + 0);
hist(image.ptr(), hist_host, width, height);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist (Host) : %9.6f sec\n", wt);
//CUDA Buffer Allocation
int *d_hist_gpu;
unsigned char *d_image;
gpuErrchk(cudaMalloc((void **)&d_hist_gpu, sizeof(int) * NBINS));
gpuErrchk(cudaMalloc((void **)&d_image, sizeof(unsigned char) * width * height));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Copy Image to the device
//TODO Define Grid and Block
//TODO Launch Kernel hist_v1
gpuErrchk(cudaPeekAtLastError());
//TODO Copy histogram from the device
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist (GPU) : %9.6f sec\n", wt);
for (int i = 0; i < NBINS; i++)
{
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
assert(iret == 0);
}
// Reset Output
gpuErrchk(cudaMemset(d_hist_gpu, 0, NBINS * sizeof(unsigned int)));
clock_gettime(CLOCK_REALTIME, rt + 0);
//TODO Copy Image to the device
//Use the same dimBlock, dimGrid of previous version
//TODO Launch Kernel hist_v2
gpuErrchk(cudaPeekAtLastError());
//TODO Copy histogram from the device
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("Hist-2 (GPU) : %9.6f sec\n", wt);
for (int i = 0; i < NBINS; i++)
{
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
assert(iret == 0);
}
gpuErrchk(cudaFree(d_hist_gpu));
gpuErrchk(cudaFree(d_image));
return iret;
}
|
a007bc3d8487afa0862b8fbac1242e01785cc01a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <iostream>
#include <string>
int main() {
int device_count;
hipGetDeviceCount(&device_count);
for (int i = 0; i < device_count; i++) {
//cuda
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, i);
std::cout << "=============================================================================" << std::endl;
std::cout << "GPU device" << i << ": " << device_prop.name << std::endl;
std::cout << "" << device_prop.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM(sm)" << device_prop.multiProcessorCount << std::endl;
std::cout << "" << device_prop.sharedMemPerBlock / 1024.0 << "KB" << std::endl;
std::cout << "32" << device_prop.regsPerBlock << std::endl;
std::cout << "SM" << device_prop.maxThreadsPerMultiProcessor << std::endl;
std::cout << "SM" << device_prop.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << "" << device_prop.multiProcessorCount << std::endl;
std::cout << "=============================================================================" << std::endl;
}
return 0;
}
|
a007bc3d8487afa0862b8fbac1242e01785cc01a.cu
|
#include "device_launch_parameters.h"
#include <iostream>
#include <string>
int main() {
int device_count;
cudaGetDeviceCount(&device_count);
for (int i = 0; i < device_count; i++) {
//cuda存放设备信息的结构体
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, i);
std::cout << "=============================================================================" << std::endl;
std::cout << "使用GPU device:" << i << ": " << device_prop.name << std::endl;
std::cout << "设备全局内存总量:" << device_prop.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM数量(一个线程块对应一个物理上的sm):" << device_prop.multiProcessorCount << std::endl;
std::cout << "每个线程块的共享内存大小:" << device_prop.sharedMemPerBlock / 1024.0 << "KB" << std::endl;
std::cout << "设备上一个线程块中可用的32位寄存器数量:" << device_prop.regsPerBlock << std::endl;
std::cout << "每个SM的最大线程数:" << device_prop.maxThreadsPerMultiProcessor << std::endl;
std::cout << "每个SM的最大线程束数:" << device_prop.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << "设备上多处理器的数量:" << device_prop.multiProcessorCount << std::endl;
std::cout << "=============================================================================" << std::endl;
}
return 0;
}
|
40e1d888e950de8985f7247e55dcf7a3bcf42720.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define N 67000
#define MIN(a,b) (a < b?a:b )
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; //El id del thread es el id que tiene ese thread dentro de un bloque
c[tid]=a[tid]+b[tid]; //El id del bloque es el id que tiene ese bloque dentro del grid
} //La dimension del bloque es el numero de threads que tiene cada bloque
int main()
{
int a[N], b[N], c[N];//host
int *dev_a, *dev_b, *dev_c;//device
hipMalloc((void**)&dev_a, N*sizeof(int) );
hipMalloc((void**)&dev_b, N*sizeof(int) );
hipMalloc((void**)&dev_c, N*sizeof(int) );
for (int i = 0; i < N; i++){
a[i] = i,
b[i] = 1;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice); //host to device
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
//Calculamos el mximo divisor menor o igual a 512 de N
//Podemos hacer esto o hacer que los threads que obtengan un tid mayor a N no modifiquen el vector
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
hipLaunchKernelGGL(( add), dim3(blocks),dim3(threads_block), 0, 0, dev_a,dev_b,dev_c);
//Call CUDA kernel
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);//Copy memory from device to host
//copy array to host
for (int i = 0; i < N; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
hipFree(dev_a);//free device mem
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
40e1d888e950de8985f7247e55dcf7a3bcf42720.cu
|
#include "stdio.h"
#define N 67000
#define MIN(a,b) (a < b?a:b )
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; //El id del thread es el id que tiene ese thread dentro de un bloque
c[tid]=a[tid]+b[tid]; //El id del bloque es el id que tiene ese bloque dentro del grid
} //La dimension del bloque es el numero de threads que tiene cada bloque
int main()
{
int a[N], b[N], c[N];//host
int *dev_a, *dev_b, *dev_c;//device
cudaMalloc((void**)&dev_a, N*sizeof(int) );
cudaMalloc((void**)&dev_b, N*sizeof(int) );
cudaMalloc((void**)&dev_c, N*sizeof(int) );
for (int i = 0; i < N; i++){
a[i] = i,
b[i] = 1;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); //host to device
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
//Calculamos el máximo divisor menor o igual a 512 de N
//Podemos hacer esto o hacer que los threads que obtengan un tid mayor a N no modifiquen el vector
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
add<<<blocks,threads_block>>>(dev_a,dev_b,dev_c);
//Call CUDA kernel
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);//Copy memory from device to host
//copy array to host
for (int i = 0; i < N; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
cudaFree(dev_a);//free device mem
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
8fce9a4cc64f6e6dfc70a918c3eab44a4d989966.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <iostream>
#define N 1024
using namespace std;
__global__ void transpose(int A[][N])//,int B[][N],int C[][N])
{
int id = threadIdx.x;
for(int j=0;j<id;j++)
{
int t = A[id][j] ^ A[j][id];
A[id][j] = t ^ A[id][j];
A[j][id] = t ^ A[j][id];
}
}
int A[N][N];
int main(int argc,char *argv[])
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
A[i][j]=2*i+j;
// cout<<A[i][j]<<" ";
}
//cout<<endl;
}
int (*A_D)[N];
hipMalloc((void**)&A_D,(N*N)*sizeof(int));
hipMemcpy(A_D,A,(N*N)*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose), dim3(1),dim3(N), 0, 0, A_D);//,B_D,C_D);
hipMemcpy(A,A_D,(N*N)*sizeof(int),hipMemcpyDeviceToHost);
// for(int i=0;i<N;i++)
// {
// for(int j=0;j<N;j++)
// {
// cout<<A[i][j]<<" ";
// }
// cout<<endl;
// }
hipFree(A_D);
return 0;
}
|
8fce9a4cc64f6e6dfc70a918c3eab44a4d989966.cu
|
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <iostream>
#define N 1024
using namespace std;
__global__ void transpose(int A[][N])//,int B[][N],int C[][N])
{
int id = threadIdx.x;
for(int j=0;j<id;j++)
{
int t = A[id][j] ^ A[j][id];
A[id][j] = t ^ A[id][j];
A[j][id] = t ^ A[j][id];
}
}
int A[N][N];
int main(int argc,char *argv[])
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
A[i][j]=2*i+j;
// cout<<A[i][j]<<" ";
}
//cout<<endl;
}
int (*A_D)[N];
cudaMalloc((void**)&A_D,(N*N)*sizeof(int));
cudaMemcpy(A_D,A,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
transpose<<<1,N>>>(A_D);//,B_D,C_D);
cudaMemcpy(A,A_D,(N*N)*sizeof(int),cudaMemcpyDeviceToHost);
// for(int i=0;i<N;i++)
// {
// for(int j=0;j<N;j++)
// {
// cout<<A[i][j]<<" ";
// }
// cout<<endl;
// }
cudaFree(A_D);
return 0;
}
|
d702e9043803abe5121235c5c8ef25f94e90e75c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "nms_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold,
int offset) {
at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes.device());
if (boxes.numel() == 0) {
return at::empty({0}, boxes.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = (boxes_num + threadsPerBlock - 1) / threadsPerBlock;
const int col_blocks_alloc = GET_BLOCKS(boxes_num, threadsPerBlock);
Tensor mask =
at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong));
dim3 blocks(col_blocks_alloc, col_blocks_alloc);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( nms_cuda), dim3(blocks), dim3(threads), 0, stream,
boxes_num, iou_threshold, offset, boxes_sorted.data_ptr<float>(),
(unsigned long long*)mask.data_ptr<int64_t>());
// Filter the boxes which should be kept.
at::Tensor keep_t = at::zeros(
{boxes_num}, boxes.options().dtype(at::kBool).device(at::kCUDA));
hipLaunchKernelGGL(( gather_keep_from_mask), dim3(1), dim3(::min(col_blocks, THREADS_PER_BLOCK)),
col_blocks * sizeof(unsigned long long), stream,
keep_t.data_ptr<bool>(), (unsigned long long*)mask.data_ptr<int64_t>(),
boxes_num);
AT_CUDA_CHECK(hipGetLastError());
return order_t.masked_select(keep_t);
}
|
d702e9043803abe5121235c5c8ef25f94e90e75c.cu
|
// Copyright (c) OpenMMLab. All rights reserved
#include "nms_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold,
int offset) {
at::cuda::CUDAGuard device_guard(boxes.device());
if (boxes.numel() == 0) {
return at::empty({0}, boxes.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = (boxes_num + threadsPerBlock - 1) / threadsPerBlock;
const int col_blocks_alloc = GET_BLOCKS(boxes_num, threadsPerBlock);
Tensor mask =
at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong));
dim3 blocks(col_blocks_alloc, col_blocks_alloc);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
nms_cuda<<<blocks, threads, 0, stream>>>(
boxes_num, iou_threshold, offset, boxes_sorted.data_ptr<float>(),
(unsigned long long*)mask.data_ptr<int64_t>());
// Filter the boxes which should be kept.
at::Tensor keep_t = at::zeros(
{boxes_num}, boxes.options().dtype(at::kBool).device(at::kCUDA));
gather_keep_from_mask<<<1, std::min(col_blocks, THREADS_PER_BLOCK),
col_blocks * sizeof(unsigned long long), stream>>>(
keep_t.data_ptr<bool>(), (unsigned long long*)mask.data_ptr<int64_t>(),
boxes_num);
AT_CUDA_CHECK(cudaGetLastError());
return order_t.masked_select(keep_t);
}
|
00ea209d4c6e9aff6d4f9a6605fd6ce58d74cc03.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#define RADIUS 3
#define BLOCKSIZE 32
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %dn", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**********/
/* KERNEL */
/**********/
__global__ void moving_average(int *in, int *out, int N) {
__shared__ int temp[BLOCKSIZE + 2 * RADIUS];
int gindexx = threadIdx.x + blockIdx.x * blockDim.x;
int lindexx = threadIdx.x + RADIUS;
// --- Read input elements into shared memory
temp[lindexx] = (gindexx < N) ? in[gindexx] : 0;
if (threadIdx.x < RADIUS) {
temp[threadIdx.x] = (((gindexx - RADIUS) >= 0) && (gindexx <= N)) ? in[gindexx - RADIUS] : 0;
temp[threadIdx.x + (RADIUS + BLOCKSIZE)] = ((gindexx + BLOCKSIZE) < N) ? in[gindexx + BLOCKSIZE] : 0;
}
__syncthreads();
// --- Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lindexx + offset];
}
// --- Store the result
out[gindexx] = result;
}
/********/
/* MAIN */
/********/
int main() {
const int N = 55 + 2 * RADIUS;
const int constant = 4;
thrust::device_vector<int> d_in(N, constant);
thrust::device_vector<int> d_out(N);
moving_average << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data()), N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
thrust::host_vector<int> h_out = d_out;
for (int i = 0; i<N; i++)
printf("Element i = %i; h_out = %i\n", i, h_out[i]);
return 0;
}
|
00ea209d4c6e9aff6d4f9a6605fd6ce58d74cc03.cu
|
#include <thrust/device_vector.h>
#define RADIUS 3
#define BLOCKSIZE 32
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**********/
/* KERNEL */
/**********/
__global__ void moving_average(int *in, int *out, int N) {
__shared__ int temp[BLOCKSIZE + 2 * RADIUS];
int gindexx = threadIdx.x + blockIdx.x * blockDim.x;
int lindexx = threadIdx.x + RADIUS;
// --- Read input elements into shared memory
temp[lindexx] = (gindexx < N) ? in[gindexx] : 0;
if (threadIdx.x < RADIUS) {
temp[threadIdx.x] = (((gindexx - RADIUS) >= 0) && (gindexx <= N)) ? in[gindexx - RADIUS] : 0;
temp[threadIdx.x + (RADIUS + BLOCKSIZE)] = ((gindexx + BLOCKSIZE) < N) ? in[gindexx + BLOCKSIZE] : 0;
}
__syncthreads();
// --- Apply the stencil
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lindexx + offset];
}
// --- Store the result
out[gindexx] = result;
}
/********/
/* MAIN */
/********/
int main() {
const int N = 55 + 2 * RADIUS;
const int constant = 4;
thrust::device_vector<int> d_in(N, constant);
thrust::device_vector<int> d_out(N);
moving_average << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data()), N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
thrust::host_vector<int> h_out = d_out;
for (int i = 0; i<N; i++)
printf("Element i = %i; h_out = %i\n", i, h_out[i]);
return 0;
}
|
f75b0d6e27cb8750d128b21a19ddaa1e05751802.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
// input: points(b, n, c) idx(b, m)
// output: out(b, m, c)
__global__ void gather_points_kernel(int b, int n, int c, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = blockIdx.y * blockDim.x + threadIdx.x; j < m;
j += blockDim.x * gridDim.y) {
int a = idx[i * m + j];
memcpy(out + (i * m + j) * c, points + (i * n + a) * c,
sizeof(float) * c);
}
}
}
void gather_points_kernel_wrapper(int b, int n, int c, int npoints,
const float *points, const int *idx,
float *out, hipStream_t stream) {
hipError_t err;
hipLaunchKernelGGL(( gather_points_kernel), dim3(dim3(2, 8, 1)), dim3(opt_n_threads(npoints) / 4), 0,
stream, b, n, c, npoints, points, idx, out);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0)
return;
const int BlockSize = 512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
int stride = blockDim.x;
int n_threads = stride;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
if (d2 > best) {
best = d2;
besti = k;
}
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
for (int s = n_threads / 2; s > 0; s >>= 1) {
if (tid < s) {
int idx1 = tid;
int idx2 = idx1 + s;
if (dists[idx2] > dists[idx1]) {
dists[idx1] = dists[idx2];
dists_i[idx1] = dists_i[idx2];
}
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
/* if (threadIdx.x == 0) {
for (int i = 0; i < n_threads; ++i) {
if (dists[i] > best) {
best = dists[i];
besti = dists_i[i];
}
}
idxs[j] = besti;
}
__syncthreads();
old = idxs[j]; */
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, hipStream_t stream) {
hipError_t err;
hipLaunchKernelGGL(( furthest_point_sampling_kernel), dim3(b), dim3(max(opt_n_threads(n), 512)), 0,
stream, b, n, m, dataset, temp, idxs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
|
f75b0d6e27cb8750d128b21a19ddaa1e05751802.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
// input: points(b, n, c) idx(b, m)
// output: out(b, m, c)
__global__ void gather_points_kernel(int b, int n, int c, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = blockIdx.y * blockDim.x + threadIdx.x; j < m;
j += blockDim.x * gridDim.y) {
int a = idx[i * m + j];
memcpy(out + (i * m + j) * c, points + (i * n + a) * c,
sizeof(float) * c);
}
}
}
void gather_points_kernel_wrapper(int b, int n, int c, int npoints,
const float *points, const int *idx,
float *out, cudaStream_t stream) {
cudaError_t err;
gather_points_kernel<<<dim3(2, 8, 1), opt_n_threads(npoints) / 4, 0,
stream>>>(b, n, c, npoints, points, idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0)
return;
const int BlockSize = 512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
int stride = blockDim.x;
int n_threads = stride;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
if (d2 > best) {
best = d2;
besti = k;
}
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
for (int s = n_threads / 2; s > 0; s >>= 1) {
if (tid < s) {
int idx1 = tid;
int idx2 = idx1 + s;
if (dists[idx2] > dists[idx1]) {
dists[idx1] = dists[idx2];
dists_i[idx1] = dists_i[idx2];
}
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
/* if (threadIdx.x == 0) {
for (int i = 0; i < n_threads; ++i) {
if (dists[i] > best) {
best = dists[i];
besti = dists_i[i];
}
}
idxs[j] = besti;
}
__syncthreads();
old = idxs[j]; */
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, cudaStream_t stream) {
cudaError_t err;
furthest_point_sampling_kernel<<<b, max(opt_n_threads(n), 512), 0,
stream>>>(b, n, m, dataset, temp, idxs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
a326a5a58ebed6b3899ef5bdf87f6eb96e30e510.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Test Arakawa bracket operators
* Compute {f,g} = -f_y g_x + g_y f_x
*
* Input:
* f(x, y) = -sin(2 pi x)^2 sin(2 pi y)^2
* f_x = -4 pi (cos 2 pi x)sin(2 pi x) sin(2 pi y)^2
* f_y = -4 pi(cos 2 pi y) sin(2 pi y) sin(2 pi x)^2
* -> initializes arr1
*
* g(x, y) = sin(pi x) sin(pi y)
* g_x = pi cos(pi x) sin(pi y)
* g_y = pi sin(pi x) cos(pi y)
* -> initializes arr2
*
* Output
* {f,g} = 16 pi^2 cos(pi x) cos(pi y) [-(cos(2 pi x) + cos(2 pi y))sin (pi x)^2 sin(pi y)^2
* -> stored in arr3
*
*
*/
#include <iostream>
#include <sstream>
#include "slab_bc.h"
using namespace std;
int main(void){
constexpr twodads::real_t x_l{-1.0};
constexpr twodads::real_t Lx{2.0};
constexpr twodads::real_t y_l{-1.0};
constexpr twodads::real_t Ly{2.0};
constexpr size_t tlevs{1};
constexpr size_t t_src{0};
constexpr twodads::real_t diff{0.1};
constexpr twodads::real_t hv{0.0};
size_t Nx{128};
size_t My{128};
cout << "Enter Nx: ";
cin >> Nx;
cout << "Enter My: ";
cin >> My;
stringstream fname;
ofstream of;
twodads::slab_layout_t my_geom(x_l, Lx / twodads::real_t(Nx), y_l, Ly / twodads::real_t(My), Nx, 0, My, 2, twodads::grid_t::cell_centered);
twodads::bvals_t<double> my_bvals{twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_periodic, twodads::bc_t::bc_periodic,
0.0, 0.0, 0.0, 0.0};
twodads::stiff_params_t stiff_params(0.1, Lx, Ly, diff, hv, Nx, My / 2 + 1, tlevs);
{
slab_bc my_slab(my_geom, my_bvals, stiff_params);
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_an(my_geom, my_bvals, tlevs);
sol_an.apply([] __device__ (twodads::real_t dummy, size_t n, size_t m, twodads::slab_layout_t geom) -> twodads::real_t
{
twodads::real_t x{geom.get_x(n)};
twodads::real_t y{geom.get_y(m)};
return(16.0 * twodads::PI * twodads::PI * cos(twodads::PI * x) * cos(twodads::PI * y) * (cos(twodads::TWOPI * x) - cos(twodads::TWOPI * y)) * sin(twodads::PI * x) * sin(twodads::PI * x) * sin(twodads::PI * y) * sin(twodads::PI * y));
},
t_src);
fname.str(string(""));
fname << "test_arakawa_solan_" << Nx << "_out.dat";
utility :: print(sol_an, t_src, fname.str());
my_slab.initialize_arakawa(test_ns::field_t::arr1, test_ns::field_t::arr2, t_src);
// Print input to inv_laplace routine into array arr1_nx.dat
fname.str(string(""));
fname << "test_arakawa_f_" << Nx << "_in.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr1)), t_src, fname.str());
fname.str(string(""));
fname << "test_arakawa_g_" << Nx << "_in.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr2)), t_src, fname.str());
std::cout << "computing poisson bracket" << std::endl;
my_slab.arakawa(test_ns::field_t::arr1, test_ns::field_t::arr2, test_ns::field_t::arr3, t_src, t_src);
fname.str(string(""));
fname << "test_arakawa_solnum_" << Nx << "_out.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr3)), t_src, fname.str());
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_num(my_slab.get_array_ptr(test_ns::field_t::arr3));
sol_num -= sol_an;
cout << "sol_num - sol_an: Nx = " << Nx << ", My = " << My << ", L2 = " << utility :: L2(sol_num, t_src) << endl;
fname.str(string(""));
fname << "test_arakawa_diff_" << Nx << "_out.dat";
utility :: print(sol_num, t_src, fname.str());
}
hipDeviceReset();
}
// End of file test_arakawa.cu
|
a326a5a58ebed6b3899ef5bdf87f6eb96e30e510.cu
|
/*
* Test Arakawa bracket operators
* Compute {f,g} = -f_y g_x + g_y f_x
*
* Input:
* f(x, y) = -sin(2 pi x)^2 sin(2 pi y)^2
* f_x = -4 pi (cos 2 pi x)sin(2 pi x) sin(2 pi y)^2
* f_y = -4 pi(cos 2 pi y) sin(2 pi y) sin(2 pi x)^2
* -> initializes arr1
*
* g(x, y) = sin(pi x) sin(pi y)
* g_x = pi cos(pi x) sin(pi y)
* g_y = pi sin(pi x) cos(pi y)
* -> initializes arr2
*
* Output
* {f,g} = 16 pi^2 cos(pi x) cos(pi y) [-(cos(2 pi x) + cos(2 pi y))sin (pi x)^2 sin(pi y)^2
* -> stored in arr3
*
*
*/
#include <iostream>
#include <sstream>
#include "slab_bc.h"
using namespace std;
int main(void){
constexpr twodads::real_t x_l{-1.0};
constexpr twodads::real_t Lx{2.0};
constexpr twodads::real_t y_l{-1.0};
constexpr twodads::real_t Ly{2.0};
constexpr size_t tlevs{1};
constexpr size_t t_src{0};
constexpr twodads::real_t diff{0.1};
constexpr twodads::real_t hv{0.0};
size_t Nx{128};
size_t My{128};
cout << "Enter Nx: ";
cin >> Nx;
cout << "Enter My: ";
cin >> My;
stringstream fname;
ofstream of;
twodads::slab_layout_t my_geom(x_l, Lx / twodads::real_t(Nx), y_l, Ly / twodads::real_t(My), Nx, 0, My, 2, twodads::grid_t::cell_centered);
twodads::bvals_t<double> my_bvals{twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_periodic, twodads::bc_t::bc_periodic,
0.0, 0.0, 0.0, 0.0};
twodads::stiff_params_t stiff_params(0.1, Lx, Ly, diff, hv, Nx, My / 2 + 1, tlevs);
{
slab_bc my_slab(my_geom, my_bvals, stiff_params);
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_an(my_geom, my_bvals, tlevs);
sol_an.apply([] __device__ (twodads::real_t dummy, size_t n, size_t m, twodads::slab_layout_t geom) -> twodads::real_t
{
twodads::real_t x{geom.get_x(n)};
twodads::real_t y{geom.get_y(m)};
return(16.0 * twodads::PI * twodads::PI * cos(twodads::PI * x) * cos(twodads::PI * y) * (cos(twodads::TWOPI * x) - cos(twodads::TWOPI * y)) * sin(twodads::PI * x) * sin(twodads::PI * x) * sin(twodads::PI * y) * sin(twodads::PI * y));
},
t_src);
fname.str(string(""));
fname << "test_arakawa_solan_" << Nx << "_out.dat";
utility :: print(sol_an, t_src, fname.str());
my_slab.initialize_arakawa(test_ns::field_t::arr1, test_ns::field_t::arr2, t_src);
// Print input to inv_laplace routine into array arr1_nx.dat
fname.str(string(""));
fname << "test_arakawa_f_" << Nx << "_in.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr1)), t_src, fname.str());
fname.str(string(""));
fname << "test_arakawa_g_" << Nx << "_in.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr2)), t_src, fname.str());
std::cout << "computing poisson bracket" << std::endl;
my_slab.arakawa(test_ns::field_t::arr1, test_ns::field_t::arr2, test_ns::field_t::arr3, t_src, t_src);
fname.str(string(""));
fname << "test_arakawa_solnum_" << Nx << "_out.dat";
utility :: print((*my_slab.get_array_ptr(test_ns::field_t::arr3)), t_src, fname.str());
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_num(my_slab.get_array_ptr(test_ns::field_t::arr3));
sol_num -= sol_an;
cout << "sol_num - sol_an: Nx = " << Nx << ", My = " << My << ", L2 = " << utility :: L2(sol_num, t_src) << endl;
fname.str(string(""));
fname << "test_arakawa_diff_" << Nx << "_out.dat";
utility :: print(sol_num, t_src, fname.str());
}
cudaDeviceReset();
}
// End of file test_arakawa.cu
|
049ee17453c7b928d1e7e2956e86d9b67382d9e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/concat/concat.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/concat/concat.cuh"
#include "src/cuda/utils.cuh"
#include "megdnn/dtype.h"
namespace megdnn {
namespace cuda {
namespace concat {
template <typename T>
__global__ void forward_kernel(const T **srcs, T *dst,
size_t nr_srcs,
size_t A, size_t B, size_t C,
const size_t *Bv,
const size_t *table_outer,
const size_t *table_inner)
{
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < A*B*C) {
size_t c = addr % C;
size_t b = addr / C % B;
size_t a = addr / (B*C);
size_t i = table_outer[b];
size_t B_src = Bv[i];
size_t b_src = table_inner[b];
size_t addr_src = (a*B_src + b_src)*C + c;
dst[addr] = srcs[i][addr_src];
}
}
template <typename T>
void forward_proxy(const T **srcs,
T *dst,
size_t nr_srcs,
size_t A, size_t B, size_t C,
const size_t *Bv,
const size_t *table_outer,
const size_t *table_inner,
hipStream_t stream)
{
size_t total_nr_elem = A * B * C;
size_t NR_BLOCKS = DIVUP(total_nr_elem, NR_THREADS);
hipLaunchKernelGGL(( forward_kernel), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream, srcs, dst,
nr_srcs,
A, B, C,
Bv,
table_outer,
table_inner);
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>(const T**, T *, size_t, size_t, size_t, size_t, \
const size_t *, const size_t *, const size_t *, hipStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace concat
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
049ee17453c7b928d1e7e2956e86d9b67382d9e8.cu
|
/**
* \file dnn/src/cuda/concat/concat.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/concat/concat.cuh"
#include "src/cuda/utils.cuh"
#include "megdnn/dtype.h"
namespace megdnn {
namespace cuda {
namespace concat {
template <typename T>
__global__ void forward_kernel(const T **srcs, T *dst,
size_t nr_srcs,
size_t A, size_t B, size_t C,
const size_t *Bv,
const size_t *table_outer,
const size_t *table_inner)
{
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < A*B*C) {
size_t c = addr % C;
size_t b = addr / C % B;
size_t a = addr / (B*C);
size_t i = table_outer[b];
size_t B_src = Bv[i];
size_t b_src = table_inner[b];
size_t addr_src = (a*B_src + b_src)*C + c;
dst[addr] = srcs[i][addr_src];
}
}
template <typename T>
void forward_proxy(const T **srcs,
T *dst,
size_t nr_srcs,
size_t A, size_t B, size_t C,
const size_t *Bv,
const size_t *table_outer,
const size_t *table_inner,
cudaStream_t stream)
{
size_t total_nr_elem = A * B * C;
size_t NR_BLOCKS = DIVUP(total_nr_elem, NR_THREADS);
forward_kernel<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(srcs, dst,
nr_srcs,
A, B, C,
Bv,
table_outer,
table_inner);
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>(const T**, T *, size_t, size_t, size_t, size_t, \
const size_t *, const size_t *, const size_t *, cudaStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace concat
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
acaf7d3fd8bfd46359a1b51904beca406f63277f.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file dct_lee_cuda_kernel.cu
* @author Yibo Lin
* @date Oct 2018
*/
//#include <stdexcept>
//#include <algorithm>
#include <cassert>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "dct_lee_cuda.h"
#include "dct_lee_cuda_kernel.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
namespace lee
{
constexpr double PI = 3.14159265358979323846;
/// Return true if a number is power of 2
template <typename T>
inline bool isPowerOf2(T val)
{
return val && (val & (val - 1)) == 0;
}
template <typename T>
inline void swap(T& x, T& y)
{
T tmp = x;
x = y;
y = tmp;
}
/// Precompute cosine values needed for N-point dct
/// @param cos size N - 1 buffer on GPU, contains the result after function call
/// @param N the length of target dct, must be power of 2
template <typename TValue>
void precompute_dct_cos(TValue *cos, int N)
{
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// create the array on host
TValue* cos_host = new TValue [N];
int offset = 0;
int halfLen = N / 2;
while (halfLen)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (int i = 0; i < halfLen; ++i)
{
cos_host[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen /= 2;
}
// copy to GPU
hipMemcpy(cos, cos_host, N*sizeof(TValue), hipMemcpyHostToDevice);
delete [] cos_host;
}
/// Precompute cosine values needed for N-point idct
/// @param cos size N - 1 buffer on GPU, contains the result after function call
/// @param N the length of target idct, must be power of 2
template <typename TValue>
void precompute_idct_cos(TValue *cos, int N)
{
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// create the array on host
TValue* cos_host = new TValue [N];
int offset = 0;
int halfLen = 1;
while(halfLen < N)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (int i = 0; i < halfLen; ++i)
{
cos_host[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen *= 2;
}
// copy to GPU
hipMemcpy(cos, cos_host, N*sizeof(TValue), hipMemcpyHostToDevice);
delete [] cos_host;
}
/// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms
/// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984
///
/// Lee's algorithm has a recursive structure in nature.
/// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
///
/// My implementation here is iterative, which is more efficient than the recursive version.
/// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT
/// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1
///
/// @param vec length M * N sequence to be transformed in last dimension
/// @param out length M * N helping buffer, which is also the output
/// @param buf length M * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos'
/// @param M length of dimension 0 of vec
/// @param N length of dimension 1 of vec, must be power of 2
template <typename TValue>
void dct(const TValue *vec, TValue *out, TValue* buf, const TValue *cos, int M, int N)
{
int block_count = 2048;
int thread_count = 512;
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = buf;
TValue *next = out;
// 'temp' used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
hipMemcpy(curr, vec, M*N*sizeof(TValue), hipMemcpyDeviceToDevice);
// Current bufferfly length and half length
int len = N;
int halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
int cosOffset = 0;
while (halfLen)
{
hipLaunchKernelGGL(( computeDctForward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, cos, M, N, len, halfLen, cosOffset);
swap(curr, next);
cosOffset += halfLen;
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final DCT solution
// Note that the case len = 2 will do nothing, so we start from len = 4
len = 4;
halfLen = 2;
while (halfLen < N)
{
hipLaunchKernelGGL(( computeDctBackward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, M, N, len, halfLen);
swap(curr, next);
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
hipMemcpy(out, curr, M*N*sizeof(TValue), hipMemcpyDeviceToDevice);
}
}
/// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1
/// @param vec length M * N sequence to be transformed
/// @param out length M * N helping buffer, which is also the output
/// @param buf length M * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos'
/// @param M length of dimension 0 of vec
/// @param N length of dimension 1 of vec, must be power of 2
template <typename TValue>
void idct(const TValue *vec, TValue *out, TValue *buf, const TValue *cos, int M, int N)
{
int block_count = 32;
int thread_count = 1024;
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = buf;
TValue *next = out;
// This array is used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
hipMemcpy(curr, vec, M*N*sizeof(TValue), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( computeIdctScale0), dim3(block_count), dim3(thread_count), 0, 0, curr, M, N);
// Current bufferfly length and half length
int len = N;
int halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
while (halfLen)
{
hipLaunchKernelGGL(( computeIdctForward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, M, N, len, halfLen);
swap(curr, next);
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final IDCT solution
len = 2;
halfLen = 1;
int cosOffset = 0;
while(halfLen < N)
{
hipLaunchKernelGGL(( ComputeIdctBackward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, cos, M, N, len, halfLen, cosOffset);
swap(curr, next);
cosOffset += halfLen;
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
hipMemcpy(out, curr, M*N*sizeof(TValue), hipMemcpyDeviceToDevice);
}
}
} // End of namespace lee
#define REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \
void instantiateDctPrecomputeCosCudaLauncher(\
type* cos, \
int N \
) \
{ \
lee::precompute_dct_cos<type>( \
cos, \
N \
); \
}
REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float);
REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \
void instantiateIdctPrecomputeCosCudaLauncher(\
type* cos, \
int N \
) \
{ \
lee::precompute_idct_cos<type>( \
cos, \
N \
); \
}
REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT_KERNEL_LAUNCHER(type) \
void instantiateDctCudaLauncher(\
const type* vec, \
type* curr, \
type* next, \
const type* cos, \
int M, \
int N \
) \
{ \
lee::dct<type>( \
vec, \
curr, \
next, \
cos, \
M, \
N \
); \
}
REGISTER_DCT_KERNEL_LAUNCHER(float);
REGISTER_DCT_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_KERNEL_LAUNCHER(type) \
void instantiateIdctCudaLauncher(\
const type* vec, \
type* curr, \
type* next, \
const type* cos, \
int M, \
int N \
) \
{ \
lee::idct<type>( \
vec, \
curr, \
next, \
cos, \
M, \
N \
); \
}
REGISTER_IDCT_KERNEL_LAUNCHER(float);
REGISTER_IDCT_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
acaf7d3fd8bfd46359a1b51904beca406f63277f.cu
|
/**
* @file dct_lee_cuda_kernel.cu
* @author Yibo Lin
* @date Oct 2018
*/
//#include <stdexcept>
//#include <algorithm>
#include <cassert>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "dct_lee_cuda.h"
#include "dct_lee_cuda_kernel.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
namespace lee
{
constexpr double PI = 3.14159265358979323846;
/// Return true if a number is power of 2
template <typename T>
inline bool isPowerOf2(T val)
{
return val && (val & (val - 1)) == 0;
}
template <typename T>
inline void swap(T& x, T& y)
{
T tmp = x;
x = y;
y = tmp;
}
/// Precompute cosine values needed for N-point dct
/// @param cos size N - 1 buffer on GPU, contains the result after function call
/// @param N the length of target dct, must be power of 2
template <typename TValue>
void precompute_dct_cos(TValue *cos, int N)
{
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// create the array on host
TValue* cos_host = new TValue [N];
int offset = 0;
int halfLen = N / 2;
while (halfLen)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (int i = 0; i < halfLen; ++i)
{
cos_host[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen /= 2;
}
// copy to GPU
cudaMemcpy(cos, cos_host, N*sizeof(TValue), cudaMemcpyHostToDevice);
delete [] cos_host;
}
/// Precompute cosine values needed for N-point idct
/// @param cos size N - 1 buffer on GPU, contains the result after function call
/// @param N the length of target idct, must be power of 2
template <typename TValue>
void precompute_idct_cos(TValue *cos, int N)
{
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// create the array on host
TValue* cos_host = new TValue [N];
int offset = 0;
int halfLen = 1;
while(halfLen < N)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (int i = 0; i < halfLen; ++i)
{
cos_host[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen *= 2;
}
// copy to GPU
cudaMemcpy(cos, cos_host, N*sizeof(TValue), cudaMemcpyHostToDevice);
delete [] cos_host;
}
/// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms
/// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984
///
/// Lee's algorithm has a recursive structure in nature.
/// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
///
/// My implementation here is iterative, which is more efficient than the recursive version.
/// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT
/// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1
///
/// @param vec length M * N sequence to be transformed in last dimension
/// @param out length M * N helping buffer, which is also the output
/// @param buf length M * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos'
/// @param M length of dimension 0 of vec
/// @param N length of dimension 1 of vec, must be power of 2
template <typename TValue>
void dct(const TValue *vec, TValue *out, TValue* buf, const TValue *cos, int M, int N)
{
int block_count = 2048;
int thread_count = 512;
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = buf;
TValue *next = out;
// 'temp' used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
cudaMemcpy(curr, vec, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice);
// Current bufferfly length and half length
int len = N;
int halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
int cosOffset = 0;
while (halfLen)
{
computeDctForward<<<block_count, thread_count>>>(curr, next, cos, M, N, len, halfLen, cosOffset);
swap(curr, next);
cosOffset += halfLen;
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final DCT solution
// Note that the case len = 2 will do nothing, so we start from len = 4
len = 4;
halfLen = 2;
while (halfLen < N)
{
computeDctBackward<<<block_count, thread_count>>>(curr, next, M, N, len, halfLen);
swap(curr, next);
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
cudaMemcpy(out, curr, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice);
}
}
/// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1
/// @param vec length M * N sequence to be transformed
/// @param out length M * N helping buffer, which is also the output
/// @param buf length M * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos'
/// @param M length of dimension 0 of vec
/// @param N length of dimension 1 of vec, must be power of 2
template <typename TValue>
void idct(const TValue *vec, TValue *out, TValue *buf, const TValue *cos, int M, int N)
{
int block_count = 32;
int thread_count = 1024;
// The input length must be power of 2
if (! isPowerOf2<int>(N))
{
printf("Input length is not power of 2.\n");
assert(0);
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = buf;
TValue *next = out;
// This array is used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
cudaMemcpy(curr, vec, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice);
computeIdctScale0<<<block_count, thread_count>>>(curr, M, N);
// Current bufferfly length and half length
int len = N;
int halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
while (halfLen)
{
computeIdctForward<<<block_count, thread_count>>>(curr, next, M, N, len, halfLen);
swap(curr, next);
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final IDCT solution
len = 2;
halfLen = 1;
int cosOffset = 0;
while(halfLen < N)
{
ComputeIdctBackward<<<block_count, thread_count>>>(curr, next, cos, M, N, len, halfLen, cosOffset);
swap(curr, next);
cosOffset += halfLen;
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
cudaMemcpy(out, curr, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice);
}
}
} // End of namespace lee
#define REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \
void instantiateDctPrecomputeCosCudaLauncher(\
type* cos, \
int N \
) \
{ \
lee::precompute_dct_cos<type>( \
cos, \
N \
); \
}
REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float);
REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \
void instantiateIdctPrecomputeCosCudaLauncher(\
type* cos, \
int N \
) \
{ \
lee::precompute_idct_cos<type>( \
cos, \
N \
); \
}
REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT_KERNEL_LAUNCHER(type) \
void instantiateDctCudaLauncher(\
const type* vec, \
type* curr, \
type* next, \
const type* cos, \
int M, \
int N \
) \
{ \
lee::dct<type>( \
vec, \
curr, \
next, \
cos, \
M, \
N \
); \
}
REGISTER_DCT_KERNEL_LAUNCHER(float);
REGISTER_DCT_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_KERNEL_LAUNCHER(type) \
void instantiateIdctCudaLauncher(\
const type* vec, \
type* curr, \
type* next, \
const type* cos, \
int M, \
int N \
) \
{ \
lee::idct<type>( \
vec, \
curr, \
next, \
cos, \
M, \
N \
); \
}
REGISTER_IDCT_KERNEL_LAUNCHER(float);
REGISTER_IDCT_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
54a2a3139787d6b1d7607b5a0f84d0ed7d690ac5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __KERNELS
#define __KERNELS
#define FILTER_SIZE 5
#define OUTPUT_TILE_SIZE 12
#define INPUT_TILE_SIZE (OUTPUT_TILE_SIZE + FILTER_SIZE - 1)
#define FILTER_SIZE_GRAD 3
#define INPUT_TILE_SIZE_GRAD (OUTPUT_TILE_SIZE + FILTER_SIZE_GRAD - 1)
#define OVERHANG ((INPUT_TILE_SIZE - OUTPUT_TILE_SIZE) / 2)
#define OVERHANG_GRAD ((INPUT_TILE_SIZE_GRAD - OUTPUT_TILE_SIZE) / 2)
__constant__ float gauss_filter[FILTER_SIZE][FILTER_SIZE] =
{{(float)2/159, (float)4/159, (float)5/159, (float)4/159, (float)2/159},
{(float)4/159, (float)9/159, (float)12/159, (float)9/159, (float)4/159},
{(float)5/159, (float)12/159, (float)15/159, (float)12/159, (float)5/159},
{(float)4/159, (float)9/159, (float)12/159, (float)9/159, (float)4/159},
{(float)2/159, (float)4/159, (float)5/159, (float)4/159, (float)2/159}};
__constant__ float Gx_filter[FILTER_SIZE_GRAD][FILTER_SIZE_GRAD] =
{{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
__constant__ float Gy_filter[FILTER_SIZE_GRAD][FILTER_SIZE_GRAD] =
{{1, 2, 1},
{0, 0, 0},
{-1, -2. -1}};
__global__ void grayscale_kernel(float * r, float * g, float * b, float * out, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
out[y * width + x] = (r[y * width + x] + g[y * width + x] + b[y * width + x]) / 3;
}
}
__global__ void gaussian_filter_kernel(float * in, float * out, int width, int height) {
__shared__ float collab[INPUT_TILE_SIZE][INPUT_TILE_SIZE];
float *N_data = in;
float *P_data = out;
//these are the indices that reference the output array
int output_x = blockIdx.x * OUTPUT_TILE_SIZE + threadIdx.x - OVERHANG;
int output_y = blockIdx.y * OUTPUT_TILE_SIZE + threadIdx.y - OVERHANG;
collab[threadIdx.y][threadIdx.x] = (output_x >= 0 && output_x < width
&& output_y >= 0 && output_y < height) ? N_data[output_y * width + output_x] : 0;
__syncthreads();
if((int)threadIdx.x - OVERHANG >= 0 &&
(int)threadIdx.x - OVERHANG < OUTPUT_TILE_SIZE &&
(int)threadIdx.y - OVERHANG >= 0 &&
(int)threadIdx.y - OVERHANG < OUTPUT_TILE_SIZE &&
output_x < width && output_y < height)
{
float accum = 0.0f;
for(int i = 0; i < FILTER_SIZE; i++)
{
for(int j = 0; j < FILTER_SIZE; j++)
{
accum += collab[threadIdx.y + i - OVERHANG][threadIdx.x + j - OVERHANG] * gauss_filter[i][j];
}
}
P_data[output_y * width + output_x] = accum;
}
}
__global__ void gradient_calc_kernel(float * in, float * G, int width, int height) {
__shared__ float collab[INPUT_TILE_SIZE_GRAD][INPUT_TILE_SIZE_GRAD];
float *N_data = in;
float *P_data = G;
//these are the indices that reference the output array
int output_x = blockIdx.x * OUTPUT_TILE_SIZE + threadIdx.x - OVERHANG_GRAD;
int output_y = blockIdx.y * OUTPUT_TILE_SIZE + threadIdx.y - OVERHANG_GRAD;
collab[threadIdx.y][threadIdx.x] = (output_x >= 0 && output_x < width
&& output_y >= 0 && output_y < height) ? N_data[output_y * width + output_x] : 0;
__syncthreads();
if((int)threadIdx.x - OVERHANG_GRAD >= 0 &&
(int)threadIdx.x - OVERHANG_GRAD < OUTPUT_TILE_SIZE &&
(int)threadIdx.y - OVERHANG_GRAD >= 0 &&
(int)threadIdx.y - OVERHANG_GRAD < OUTPUT_TILE_SIZE &&
output_x < width && output_y < height)
{
float accumX = 0.0f;
float accumY = 0.0f;
for(int i = 0; i < FILTER_SIZE_GRAD; i++)
{
for(int j = 0; j < FILTER_SIZE_GRAD; j++)
{
accumX += collab[threadIdx.y + i - OVERHANG_GRAD][threadIdx.x + j - OVERHANG_GRAD] * Gx_filter[i][j];
accumY += collab[threadIdx.y + i - OVERHANG_GRAD][threadIdx.x + j - OVERHANG_GRAD] * Gy_filter[i][j];
}
}
P_data[output_y * width + output_x] = abs(accumX) + abs(accumY);
if(P_data[output_y * width + output_x] >= 255)
P_data[output_y * width + output_x] = 255;
}
}
__global__ void theta_calc_kernel(float * gradX, float * gradY, int * out, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
float val = atan2(gradY[y * width + x], gradX[y * width + x]);
val /= 3.1415926535;//PI
//now we've constrained ourselves to a range of [-1, 1]
if(val >= -.125 && val < .125)
out[y * width + x] = 0;
else if(val >= .125 && val < .375)
out[y * width + x] = 1;
else if(val >= .375 && val < .625)
out[y * width + x] = 2;
else if(val >= .625 && val < .875)
out[y * width + x] = 3;
else if(val >= .875 || val < -.875)
out[y * width + x] = 4;
else if(val >= -.875 && val < -.625)
out[y * width + x] = 5;
else if(val >= -.625 && val < -.375)
out[y * width + x] = 6;
else
out[y * width + x] = 7;
}
}
__global__ void diff_kernel(float *orig, float *comp, float *out, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
out[y * width + x] = abs(orig[y * width + x] - comp[y * width + x]/3);
}
}
__global__ void skin_detection(float * R, float * G, float * B, float * retR, float * retG, int width, int height) {
//INSERT KERNEL CODE HERE
float r, g, Red, Green, Blue, F1, F2, w, theta, H, I;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
if(row_o < height && col_o < width)
{
/*get the RGB value*/
Red = R[row_o * width + col_o];
Green = G[row_o * width + col_o];
Blue = B[row_o * width + col_o];
/*get the intensity*/
I = (Red + Green + Blue)/3;
/*normalized red and green*/
r = Red/(Red + Green + Blue);
g = Green/(Red + Green + Blue);
/*function 1 and 2 and w in the doc*/
F1 = -1.376 * r * r + 1.0743 * r + 0.2;
F2 = -0.776 * r * r + 0.5601 * r + 0.18;
w = (r - 0.33) * (r-0.33) + (g - 0.33) * (g - 0.33);
theta = acos(0.5 * (Red * 2 - Green - Blue) / sqrt((Red - Green) * (Red - Green) + (Red - Blue) * (Green - Blue)));
if (Blue <= Green)
H = theta;
else
H = 3.1415926535 * 2 - theta;
/*face detected*/
if(g < F1 && g > F2 && w > 0.001 && ((H > (3.1415926535*4/3)) || (H < (3.1415926535 / 9))))
/*set R to be 255*/
retR[row_o * width + col_o] = 255;
else
retR[row_o * width + col_o] = 0;
/*hair detected*/
if( I < 80 && (Blue - Green < 15 || Blue - Red < 15))
//if ((H <= (2*3.1415926535/9)) && H > (3.1415926535 /9))
retG[row_o * width + col_o] = 255;
else
retG[row_o * width + col_o] = 0;
}
}
__global__ void clean_up(float * R, float * G, float * retR, float * retG, int width, int height) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
int i,j;
int counter1 = 0;
int counter2 = 0;
if(R[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) < 0)
{
if(R[0] == 255)
counter1++;
}
else if(R[(row_o + j) * width + col_o+i] == 255)
counter1++;
}
}
}
if(G[row_o * width + col_o] == 255)
{
for(i = -3; i < 4; i++)
{
for(j = -3; j < 4; j++)
{
if(((row_o + j) * width + col_o+i) < 0)
{
if(G[0] == 255)
counter2++;
}
else if(G[(row_o + j) * width + col_o+i] == 255)
counter2++;
}
}
}
if(counter1 >20)
retR[row_o * width + col_o] = 255;
else
retR[row_o * width + col_o] = 0;
if(counter2 >20)
retG[row_o * width + col_o] = 255;
else
retG[row_o * width + col_o] = 0;
}
__global__ void quantization(float * R, float * G, float * retR, float * retG, int width, int height) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
int i,j;
if(R[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) <= 0)
{
retR[0] = 255;
}
else
retR[(row_o + j) * width + col_o+i] = 255;
}
}
}
else if(G[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) <= 0)
retG[0] = 0;
else
retG[(row_o + j) * width + col_o+i] = 255;
}
}
}
}
void face_detection(float * R, float * G, float * r_out, float * g_out,float * b_out,int width, int height) {
int i, j;
int min_x_r = width-1;
int max_x_r = 0;
int min_y_r = height-1;
int max_y_r = 0;
int min_x_g = width-1;
int max_x_g = 0;
int min_y_g = height-1;
int max_y_g = 0;
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
/*skin detection*/
if(R[j* width + i] == 255)
{
//printf("%d\n", j);
if(j < min_y_r)
min_y_r = j;
else if(j > max_y_r)
max_y_r = j;
if(i < min_x_r)
min_x_r = i;
else if(i > max_x_r)
max_x_r = i;
}
}
}
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
/*skin detection*/
if(G[j* width + i] == 255)
{
//printf("%d\n", j);
if(j < min_y_g)
min_y_g = j;
else if(j > max_y_g)
max_y_g = j;
if(i < min_x_g)
min_x_g = i;
else if(i > max_x_g)
max_x_g = i;
}
}
}
/*draw the box*/
/*draw the box*/
//if(min_y_r > min_y_g && min_x_r > min_x_g && max_x_r < max_x_g)
//{
//printf("%d\n", min_x_r);
// printf("%d\n", max_x_r);
// printf("%d\n", min_y_r);
// printf("%d\n", max_x_r);
if(min_x_r >= min_x_g && min_y_r >= min_y_g && max_x_r <= max_x_g)
{
for(i = min_x_r; i < max_x_r; i++)
{
r_out[max_y_r * width + i] = 255;
g_out[max_y_r * width + i] = 0;
b_out[max_y_r * width + i] = 0;
r_out[min_y_r * width + i] = 255;
g_out[min_y_r * width + i] = 0;
b_out[min_y_r * width + i] = 0;
}
for(i = min_y_r; i < max_y_r; i++)
{
r_out[i * width + min_x_r] = 255;
g_out[i * width + min_x_r] = 0;
b_out[i * width + min_x_r] = 0;
r_out[i * width + max_x_r] = 255;
g_out[i * width + max_x_r] = 0;
b_out[i * width + max_x_r] = 0;
}
}
//image->writeToFile("poop.png");
for(i = min_x_r; i < max_x_r; i++)
{
R[max_y_r * width + i] = 255;
R[min_y_r * width + i] = 255;
}
for(i = min_y_r; i < max_y_r; i++)
{
R[i * width + min_x_r] = 255;
R[i * width + max_x_r] = 255;
}
}
#endif //__KERNELS
|
54a2a3139787d6b1d7607b5a0f84d0ed7d690ac5.cu
|
#ifndef __KERNELS
#define __KERNELS
#define FILTER_SIZE 5
#define OUTPUT_TILE_SIZE 12
#define INPUT_TILE_SIZE (OUTPUT_TILE_SIZE + FILTER_SIZE - 1)
#define FILTER_SIZE_GRAD 3
#define INPUT_TILE_SIZE_GRAD (OUTPUT_TILE_SIZE + FILTER_SIZE_GRAD - 1)
#define OVERHANG ((INPUT_TILE_SIZE - OUTPUT_TILE_SIZE) / 2)
#define OVERHANG_GRAD ((INPUT_TILE_SIZE_GRAD - OUTPUT_TILE_SIZE) / 2)
__constant__ float gauss_filter[FILTER_SIZE][FILTER_SIZE] =
{{(float)2/159, (float)4/159, (float)5/159, (float)4/159, (float)2/159},
{(float)4/159, (float)9/159, (float)12/159, (float)9/159, (float)4/159},
{(float)5/159, (float)12/159, (float)15/159, (float)12/159, (float)5/159},
{(float)4/159, (float)9/159, (float)12/159, (float)9/159, (float)4/159},
{(float)2/159, (float)4/159, (float)5/159, (float)4/159, (float)2/159}};
__constant__ float Gx_filter[FILTER_SIZE_GRAD][FILTER_SIZE_GRAD] =
{{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
__constant__ float Gy_filter[FILTER_SIZE_GRAD][FILTER_SIZE_GRAD] =
{{1, 2, 1},
{0, 0, 0},
{-1, -2. -1}};
__global__ void grayscale_kernel(float * r, float * g, float * b, float * out, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
out[y * width + x] = (r[y * width + x] + g[y * width + x] + b[y * width + x]) / 3;
}
}
__global__ void gaussian_filter_kernel(float * in, float * out, int width, int height) {
__shared__ float collab[INPUT_TILE_SIZE][INPUT_TILE_SIZE];
float *N_data = in;
float *P_data = out;
//these are the indices that reference the output array
int output_x = blockIdx.x * OUTPUT_TILE_SIZE + threadIdx.x - OVERHANG;
int output_y = blockIdx.y * OUTPUT_TILE_SIZE + threadIdx.y - OVERHANG;
collab[threadIdx.y][threadIdx.x] = (output_x >= 0 && output_x < width
&& output_y >= 0 && output_y < height) ? N_data[output_y * width + output_x] : 0;
__syncthreads();
if((int)threadIdx.x - OVERHANG >= 0 &&
(int)threadIdx.x - OVERHANG < OUTPUT_TILE_SIZE &&
(int)threadIdx.y - OVERHANG >= 0 &&
(int)threadIdx.y - OVERHANG < OUTPUT_TILE_SIZE &&
output_x < width && output_y < height)
{
float accum = 0.0f;
for(int i = 0; i < FILTER_SIZE; i++)
{
for(int j = 0; j < FILTER_SIZE; j++)
{
accum += collab[threadIdx.y + i - OVERHANG][threadIdx.x + j - OVERHANG] * gauss_filter[i][j];
}
}
P_data[output_y * width + output_x] = accum;
}
}
__global__ void gradient_calc_kernel(float * in, float * G, int width, int height) {
__shared__ float collab[INPUT_TILE_SIZE_GRAD][INPUT_TILE_SIZE_GRAD];
float *N_data = in;
float *P_data = G;
//these are the indices that reference the output array
int output_x = blockIdx.x * OUTPUT_TILE_SIZE + threadIdx.x - OVERHANG_GRAD;
int output_y = blockIdx.y * OUTPUT_TILE_SIZE + threadIdx.y - OVERHANG_GRAD;
collab[threadIdx.y][threadIdx.x] = (output_x >= 0 && output_x < width
&& output_y >= 0 && output_y < height) ? N_data[output_y * width + output_x] : 0;
__syncthreads();
if((int)threadIdx.x - OVERHANG_GRAD >= 0 &&
(int)threadIdx.x - OVERHANG_GRAD < OUTPUT_TILE_SIZE &&
(int)threadIdx.y - OVERHANG_GRAD >= 0 &&
(int)threadIdx.y - OVERHANG_GRAD < OUTPUT_TILE_SIZE &&
output_x < width && output_y < height)
{
float accumX = 0.0f;
float accumY = 0.0f;
for(int i = 0; i < FILTER_SIZE_GRAD; i++)
{
for(int j = 0; j < FILTER_SIZE_GRAD; j++)
{
accumX += collab[threadIdx.y + i - OVERHANG_GRAD][threadIdx.x + j - OVERHANG_GRAD] * Gx_filter[i][j];
accumY += collab[threadIdx.y + i - OVERHANG_GRAD][threadIdx.x + j - OVERHANG_GRAD] * Gy_filter[i][j];
}
}
P_data[output_y * width + output_x] = abs(accumX) + abs(accumY);
if(P_data[output_y * width + output_x] >= 255)
P_data[output_y * width + output_x] = 255;
}
}
__global__ void theta_calc_kernel(float * gradX, float * gradY, int * out, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
float val = atan2(gradY[y * width + x], gradX[y * width + x]);
val /= 3.1415926535;//PI
//now we've constrained ourselves to a range of [-1, 1]
if(val >= -.125 && val < .125)
out[y * width + x] = 0;
else if(val >= .125 && val < .375)
out[y * width + x] = 1;
else if(val >= .375 && val < .625)
out[y * width + x] = 2;
else if(val >= .625 && val < .875)
out[y * width + x] = 3;
else if(val >= .875 || val < -.875)
out[y * width + x] = 4;
else if(val >= -.875 && val < -.625)
out[y * width + x] = 5;
else if(val >= -.625 && val < -.375)
out[y * width + x] = 6;
else
out[y * width + x] = 7;
}
}
__global__ void diff_kernel(float *orig, float *comp, float *out, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < width && y < height)
{
out[y * width + x] = abs(orig[y * width + x] - comp[y * width + x]/3);
}
}
__global__ void skin_detection(float * R, float * G, float * B, float * retR, float * retG, int width, int height) {
//INSERT KERNEL CODE HERE
float r, g, Red, Green, Blue, F1, F2, w, theta, H, I;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
if(row_o < height && col_o < width)
{
/*get the RGB value*/
Red = R[row_o * width + col_o];
Green = G[row_o * width + col_o];
Blue = B[row_o * width + col_o];
/*get the intensity*/
I = (Red + Green + Blue)/3;
/*normalized red and green*/
r = Red/(Red + Green + Blue);
g = Green/(Red + Green + Blue);
/*function 1 and 2 and w in the doc*/
F1 = -1.376 * r * r + 1.0743 * r + 0.2;
F2 = -0.776 * r * r + 0.5601 * r + 0.18;
w = (r - 0.33) * (r-0.33) + (g - 0.33) * (g - 0.33);
theta = acos(0.5 * (Red * 2 - Green - Blue) / sqrt((Red - Green) * (Red - Green) + (Red - Blue) * (Green - Blue)));
if (Blue <= Green)
H = theta;
else
H = 3.1415926535 * 2 - theta;
/*face detected*/
if(g < F1 && g > F2 && w > 0.001 && ((H > (3.1415926535*4/3)) || (H < (3.1415926535 / 9))))
/*set R to be 255*/
retR[row_o * width + col_o] = 255;
else
retR[row_o * width + col_o] = 0;
/*hair detected*/
if( I < 80 && (Blue - Green < 15 || Blue - Red < 15))
//if ((H <= (2*3.1415926535/9)) && H > (3.1415926535 /9))
retG[row_o * width + col_o] = 255;
else
retG[row_o * width + col_o] = 0;
}
}
__global__ void clean_up(float * R, float * G, float * retR, float * retG, int width, int height) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
int i,j;
int counter1 = 0;
int counter2 = 0;
if(R[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) < 0)
{
if(R[0] == 255)
counter1++;
}
else if(R[(row_o + j) * width + col_o+i] == 255)
counter1++;
}
}
}
if(G[row_o * width + col_o] == 255)
{
for(i = -3; i < 4; i++)
{
for(j = -3; j < 4; j++)
{
if(((row_o + j) * width + col_o+i) < 0)
{
if(G[0] == 255)
counter2++;
}
else if(G[(row_o + j) * width + col_o+i] == 255)
counter2++;
}
}
}
if(counter1 >20)
retR[row_o * width + col_o] = 255;
else
retR[row_o * width + col_o] = 0;
if(counter2 >20)
retG[row_o * width + col_o] = 255;
else
retG[row_o * width + col_o] = 0;
}
__global__ void quantization(float * R, float * G, float * retR, float * retG, int width, int height) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * blockDim.y + ty;
int col_o = blockIdx.x * blockDim.x + tx;
int i,j;
if(R[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) <= 0)
{
retR[0] = 255;
}
else
retR[(row_o + j) * width + col_o+i] = 255;
}
}
}
else if(G[row_o * width + col_o] == 255)
{
for(i = -2; i < 3; i++)
{
for(j = -2; j < 3; j++)
{
if(((row_o + j) * width + col_o+i) <= 0)
retG[0] = 0;
else
retG[(row_o + j) * width + col_o+i] = 255;
}
}
}
}
void face_detection(float * R, float * G, float * r_out, float * g_out,float * b_out,int width, int height) {
int i, j;
int min_x_r = width-1;
int max_x_r = 0;
int min_y_r = height-1;
int max_y_r = 0;
int min_x_g = width-1;
int max_x_g = 0;
int min_y_g = height-1;
int max_y_g = 0;
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
/*skin detection*/
if(R[j* width + i] == 255)
{
//printf("%d\n", j);
if(j < min_y_r)
min_y_r = j;
else if(j > max_y_r)
max_y_r = j;
if(i < min_x_r)
min_x_r = i;
else if(i > max_x_r)
max_x_r = i;
}
}
}
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
/*skin detection*/
if(G[j* width + i] == 255)
{
//printf("%d\n", j);
if(j < min_y_g)
min_y_g = j;
else if(j > max_y_g)
max_y_g = j;
if(i < min_x_g)
min_x_g = i;
else if(i > max_x_g)
max_x_g = i;
}
}
}
/*draw the box*/
/*draw the box*/
//if(min_y_r > min_y_g && min_x_r > min_x_g && max_x_r < max_x_g)
//{
//printf("%d\n", min_x_r);
// printf("%d\n", max_x_r);
// printf("%d\n", min_y_r);
// printf("%d\n", max_x_r);
if(min_x_r >= min_x_g && min_y_r >= min_y_g && max_x_r <= max_x_g)
{
for(i = min_x_r; i < max_x_r; i++)
{
r_out[max_y_r * width + i] = 255;
g_out[max_y_r * width + i] = 0;
b_out[max_y_r * width + i] = 0;
r_out[min_y_r * width + i] = 255;
g_out[min_y_r * width + i] = 0;
b_out[min_y_r * width + i] = 0;
}
for(i = min_y_r; i < max_y_r; i++)
{
r_out[i * width + min_x_r] = 255;
g_out[i * width + min_x_r] = 0;
b_out[i * width + min_x_r] = 0;
r_out[i * width + max_x_r] = 255;
g_out[i * width + max_x_r] = 0;
b_out[i * width + max_x_r] = 0;
}
}
//image->writeToFile("poop.png");
for(i = min_x_r; i < max_x_r; i++)
{
R[max_y_r * width + i] = 255;
R[min_y_r * width + i] = 255;
}
for(i = min_y_r; i < max_y_r; i++)
{
R[i * width + min_x_r] = 255;
R[i * width + max_x_r] = 255;
}
}
#endif //__KERNELS
|
71b6b784cd95712194744f5eb8b20e2987912fd2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
/**
*
* ID CUDA C blockIdx.x threadIdx.x ID ID
*/
__global__ void first_kernel(void)
{
// blockIdx.x gives the block number of current kernel
printf("Hello, I am thread in block: %d\n", blockIdx.x);
}
int main(int argc, char **argv)
{
// A kernel call with 16 blocks and 1 thread per block.
first_kernel << <16, 1>> > ();
/** CUDA API
* CPU
* hipDeviceSynchronize()
*/
// Function used for waiting for all kernels to finish.
hipDeviceSynchronize();
// printf("All threads are finished!\n");
std::cout << "All thread are finished" << std::endl;
return 0;
}
|
71b6b784cd95712194744f5eb8b20e2987912fd2.cu
|
#include <iostream>
#include <stdio.h>
/**并行启动的块和线程随机顺序执行
* 在配置内核参数时,可以指定并行启动的块和线程的数量,但是其执行顺序是随机的。
* 每一个块和块内的每一个线程都有自己的ID号, CUDA C 内置变量 blockIdx.x 和 threadIdx.x 用于读取块ID 和 线程ID。
*/
__global__ void first_kernel(void)
{
// blockIdx.x gives the block number of current kernel
printf("Hello, I am thread in block: %d\n", blockIdx.x);
}
int main(int argc, char **argv)
{
// A kernel call with 16 blocks and 1 thread per block.
first_kernel << <16, 1>> > ();
/** CUDA API
* 内核启动是一个异步操作,只要发布内核启动命令,不等内核执行完成,控制权就会立刻返回调用内核的主机CPU线程。
* 使用 cudaDeviceSynchronize() 函数,内核的结果将通过标准输出显示,同时应用程序则会在内核执行完成之后才退出。
*/
// Function used for waiting for all kernels to finish.
cudaDeviceSynchronize();
// printf("All threads are finished!\n");
std::cout << "All thread are finished" << std::endl;
return 0;
}
|
0096bef18e5963321899cea2e71f9366f9ce2e92.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_fns.cu"
#define BLOCK_SIZEY MB
#define BLOCK_SIZEX NB
#define GRID_SIZE 1
int main(){
FILE *fid = fopen("backup/news_xcif.y","rb");
FILE *fid2 = fopen("news_xcif_gpu_ppred.y","wb");
FILE *fid3 = fopen("news_xcif_gpu_res.y","wb");
uint8_t *h_Yonly = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_Yonly == NULL) fprintf(stderr, "Bad malloc on Yonly \n");
uint8_t *h_predicted = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_predicted == NULL) fprintf(stderr, "Bad malloc on predicted \n");
uint8_t *h_reconstructed = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_reconstructed == NULL) fprintf(stderr, "Bad malloc on reconstructed \n");
uint8_t *h_motion_vector = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_motion_vector == NULL) fprintf(stderr, "Bad malloc on motion_vector \n");
uint8_t *h_Res_orig = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_Res_orig == NULL) fprintf(stderr, "Bad malloc on Res_orig \n");
uint8_t *d_Yonly;
uint8_t *d_predicted;
uint8_t *d_reconstructed;
uint8_t *d_motion_vector;
uint8_t *d_Res_orig;
printf("gpu with config -> M=%d, N=%d, B=%dx%d R=%d\n", M, N, Br,Bc, R);
//Allocating memory on GPU
hipMalloc((void **)&d_Yonly, M*N*F*sizeof(uint8_t));
hipMalloc((void **)&d_predicted, M*N*F*sizeof(uint8_t));
hipMalloc((void **)&d_reconstructed, M*N*F*sizeof(uint8_t));
hipMalloc((void **)&d_motion_vector, M*N*F*sizeof(uint8_t));
hipMalloc((void **)&d_Res_orig, M*N*F*sizeof(uint8_t));
//Reading from files
get_Y(fid,h_Yonly,1);
fclose(fid);
dim3 blockDim1(1,1);
dim3 gridDim1(1,1);
hipLaunchKernelGGL(( deviceInit), dim3(gridDim1),dim3(blockDim1), 0, 0, d_Yonly, d_predicted, d_reconstructed, d_motion_vector, d_Res_orig);
hipDeviceSynchronize();
start_timer();
//Transferring data from the CPU to the GPU
hipMemcpy(d_Yonly, h_Yonly, (M*N*F*sizeof(uint8_t)), hipMemcpyHostToDevice);
dim3 blockDim(BLOCK_SIZEX, BLOCK_SIZEY);
dim3 gridDim(GRID_SIZE, GRID_SIZE);
for (int kk=0; kk<F; kk++){
hipLaunchKernelGGL(( process_pblock), dim3(gridDim),dim3(blockDim), 0, 0, d_Yonly, d_Yonly, d_predicted, d_motion_vector,kk);
hipDeviceSynchronize();
}
//Transferring data from the GPU to the CPU
hipMemcpy(h_predicted, d_predicted, (M*N*F*sizeof(uint8_t)), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
end_timer("GPU - ");
//Writing files
write_Y(fid2,h_predicted);
write_diff_Y(fid3,h_Yonly,h_predicted);
//Free memory on GPU
hipFree(d_Yonly);
hipFree(d_predicted);
hipFree(d_reconstructed);
hipFree(d_motion_vector);
hipFree(d_Res_orig);
//Free memory on CPU
free(h_Yonly);
free(h_predicted);
free(h_reconstructed);
free(h_motion_vector);
free(h_Res_orig);
}
|
0096bef18e5963321899cea2e71f9366f9ce2e92.cu
|
#include "gpu_fns.cu"
#define BLOCK_SIZEY MB
#define BLOCK_SIZEX NB
#define GRID_SIZE 1
int main(){
FILE *fid = fopen("backup/news_xcif.y","rb");
FILE *fid2 = fopen("news_xcif_gpu_ppred.y","wb");
FILE *fid3 = fopen("news_xcif_gpu_res.y","wb");
uint8_t *h_Yonly = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_Yonly == NULL) fprintf(stderr, "Bad malloc on Yonly \n");
uint8_t *h_predicted = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_predicted == NULL) fprintf(stderr, "Bad malloc on predicted \n");
uint8_t *h_reconstructed = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_reconstructed == NULL) fprintf(stderr, "Bad malloc on reconstructed \n");
uint8_t *h_motion_vector = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_motion_vector == NULL) fprintf(stderr, "Bad malloc on motion_vector \n");
uint8_t *h_Res_orig = (uint8_t *) malloc(M*N*F*sizeof(uint8_t)); if (h_Res_orig == NULL) fprintf(stderr, "Bad malloc on Res_orig \n");
uint8_t *d_Yonly;
uint8_t *d_predicted;
uint8_t *d_reconstructed;
uint8_t *d_motion_vector;
uint8_t *d_Res_orig;
printf("gpu with config -> M=%d, N=%d, B=%dx%d R=%d\n", M, N, Br,Bc, R);
//Allocating memory on GPU
cudaMalloc((void **)&d_Yonly, M*N*F*sizeof(uint8_t));
cudaMalloc((void **)&d_predicted, M*N*F*sizeof(uint8_t));
cudaMalloc((void **)&d_reconstructed, M*N*F*sizeof(uint8_t));
cudaMalloc((void **)&d_motion_vector, M*N*F*sizeof(uint8_t));
cudaMalloc((void **)&d_Res_orig, M*N*F*sizeof(uint8_t));
//Reading from files
get_Y(fid,h_Yonly,1);
fclose(fid);
dim3 blockDim1(1,1);
dim3 gridDim1(1,1);
deviceInit<<<gridDim1,blockDim1>>>(d_Yonly, d_predicted, d_reconstructed, d_motion_vector, d_Res_orig);
cudaDeviceSynchronize();
start_timer();
//Transferring data from the CPU to the GPU
cudaMemcpy(d_Yonly, h_Yonly, (M*N*F*sizeof(uint8_t)), cudaMemcpyHostToDevice);
dim3 blockDim(BLOCK_SIZEX, BLOCK_SIZEY);
dim3 gridDim(GRID_SIZE, GRID_SIZE);
for (int kk=0; kk<F; kk++){
process_pblock<<<gridDim,blockDim>>>(d_Yonly, d_Yonly, d_predicted, d_motion_vector,kk);
cudaDeviceSynchronize();
}
//Transferring data from the GPU to the CPU
cudaMemcpy(h_predicted, d_predicted, (M*N*F*sizeof(uint8_t)), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
end_timer("GPU - ");
//Writing files
write_Y(fid2,h_predicted);
write_diff_Y(fid3,h_Yonly,h_predicted);
//Free memory on GPU
cudaFree(d_Yonly);
cudaFree(d_predicted);
cudaFree(d_reconstructed);
cudaFree(d_motion_vector);
cudaFree(d_Res_orig);
//Free memory on CPU
free(h_Yonly);
free(h_predicted);
free(h_reconstructed);
free(h_motion_vector);
free(h_Res_orig);
}
|
integral.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* =====================================================================================
*
* Filename: integral.cu
*
* Description: Cuda source code for evaluating exponential integral.
*
* Version: 1.0
* Created: 04/13/2016 03:37:44 PM
* Revision: none
* Compiler: nvcc
*
* Author: Michael Tierney (MT), [email protected]
*
* =====================================================================================
*/
#include "integral.hpp"
#include "stdio.h"
#include <math_constants.h>
#define WARPSIZE 32
#define MAXEVALS 1E7
#define FEPSILON 1.19209e-07
#define DEPSILON 2.22045e-16
// Helpful templated getters for GPU EPS and MAX vals.
template <typename data>
__inline__ __device__ data getEulerC() {
return 0.5772157 ;
}
template <>
__inline__ __device__ float getEulerC<>() {
return 0.5772157 ;
}
template <>
__inline__ __device__ double getEulerC<>() {
return 0.5772156649015329 ;
}
template <typename data>
__inline__ __device__ data getMaxVal() {
return CUDART_INF_F ;
}
template <>
__inline__ __device__ float getMaxVal<>() {
return CUDART_INF_F ;
}
template <>
__inline__ __device__ double getMaxVal<>() {
return CUDART_INF ;
}
template <typename data>
__inline__ __device__ data getEPS() {
return FEPSILON ;
}
template <>
__inline__ __device__ float getEPS<>() {
return FEPSILON ;
}
template <>
__inline__ __device__ double getEPS<>() {
return DEPSILON ;
}
/*
* === FUNCTION ======================================================================
* Name: integral
* Arguments: int orderm1 - The order n minus 1.
* Datatype arg - The argument x in E(n,x)
* Returns: The value of the integral \int_1^\inf \frac{e^{-xt}}{t^n} dt
* Description: Evaluates the integral E_n using a continued fraction series for x > 1.
* =====================================================================================
*/
template <typename DataType>
__inline__ __device__ DataType evalExpIntegralGt1(int orderm1, DataType arg) {
DataType del ;
DataType a = 0 ;
DataType b = arg+orderm1+1 ;
DataType c = getMaxVal<DataType>() ;
DataType d = 1.0/b ;
DataType h = d ;
DataType eps = getEPS<DataType>() ;
for (int i = 1 ; i <= MAXEVALS ; i++) {
a = -i*(orderm1+i) ;
b += 2.0 ;
d = 1.0/(a*d+b) ;
c = b+a/c ;
del = c*d ;
h *= del ;
if (fabs(del-1.0) <= eps) {
return h*exp(-arg) ;
}
}
return 0 ;
}
/*
* === FUNCTION ======================================================================
* Name: integral
* Arguments: int orderm1 - The order n minus 1.
* Datatype arg - The argument x in E(n,x)
* Returns: The value of the integral \int_1^\inf \frac{e^{-xt}}{t^n} dt
* Description: Evaluates the integral E_n using a converging series for x < 1.
* =====================================================================================
*/
template <typename DataType>
__inline__ __device__ DataType evalExpIntegralLt1(int orderm1, DataType arg) {
DataType ans = (orderm1 !=0 ? 1.0/orderm1 : -log(arg)-getEulerC<DataType>()) ;
DataType fact = 1.0 ;
DataType del = 0.0 ;
DataType psi = 0.0 ;
DataType eps = getEPS<DataType>() ;
DataType meuler = -getEulerC<DataType>() ;
for (DataType i = 1 ; i <= MAXEVALS ; i++) {
fact *= -arg/i ;
if (i != orderm1) {
del = -fact/(i-orderm1) ;
} else {
psi = meuler ;
for (DataType ii = 1; ii <= orderm1 ; ii++) {
psi += 1.0/ii ;
}
del = fact*(-log(arg)+psi) ;
}
ans += del ;
if (fabs(del) < fabs(ans)*eps) {
return ans ;
}
}
return 0 ;
}
/*
* === FUNCTION ======================================================================
* Name: evalSamples
* Arguments: int numOrders - The maximum number of orders to evaluate.
* int numberOfSamples - The number of samples to take.
* Datatype sampleRegionStart - The start or the region (a,b)
* Datatype division - Distance bewteen evaluations in (a,b)
* Datatype * gpuData - Location of data on GPU.
* Description: Evaluates E_(n,x) over domain n element of (1,n) and x element of
* (a,b) where there are numSamples evaluations of x.
* =====================================================================================
*/
template <typename DataType>
__global__ void evalSamples(int numOrders, int numberOfSamples, DataType sampleRegionStart, DataType division, DataType * gpuData) {
int globalIDx = threadIdx.x + blockIdx.x*blockDim.x ;
int globalIDy = threadIdx.y + blockIdx.y*blockDim.y ;
if (globalIDy < numberOfSamples && globalIDx < numOrders) {
DataType x = sampleRegionStart+(globalIDy+1)*division ;
if (x > 1) {
gpuData[globalIDx*numberOfSamples+globalIDy] = evalExpIntegralGt1(globalIDx,x) ;
} else {
gpuData[globalIDx*numberOfSamples+globalIDy] = evalExpIntegralLt1(globalIDx,x) ;
}
}
}
/*
* === FUNCTION ======================================================================
* Name: cudaRunExponentials
* Arguments: int order - The maximum order being evaluated.
* int numberOfSamples - The number of samples to take in domain (a,b)
* double & sampleRegionStart - The start of the domain (a,b)
* double & sampleRegionEnd - The end of the interval (a,b)
* float * resultsFloatGpu - The results for the GPU evaluations.
* double * resultsDoubleGpu - The results for the GP evaluations.
* double & timeTotalGpuFloat - Time taken to evaluate floats on GPU.
* double & timeTotalGpuDouble - Time taken to evaluate doubles on GPU.
* int blockSizeOr - The block size associated with orders.
* int blockSizeSm - The block size associated with samples.
* double & transferTimeFloat - Time taken to transfer data from GPU to DRAM.
* double & transferTimeDouble - Time taken to transfer data from GPU to DRAM.
*
* Description: Evaluates the exponential integral between (a,b) for a number of
* orders and samples.
*
* =====================================================================================
*/
void cudaRunExponentials(int order, int numberOfSamples, double & sampleRegionStart, double & sampleRegionEnd,
float * resultsFloatGpu, double * resultsDoubleGpu, double & timeTotalGpuFloat, double & timeTotalGpuDouble,
int blockSizeOr, int blockSizeSm, double & transferTimeFloat, double & transferTimeDouble) {
int numResults = numberOfSamples*order ;
dim3 dim3BlockOuter(blockSizeOr,blockSizeSm) ;
dim3 dim3GridOuter((order/dim3BlockOuter.x) + (!(order%dim3BlockOuter.x)?0:1) ,
(numberOfSamples/dim3BlockOuter.y) + (!(numberOfSamples%dim3BlockOuter.y)?0:1));
float elapsedTime ;
hipEvent_t start, finish ;
hipEvent_t transStart, transFinish ;
hipEventCreate(&start) ;
hipEventCreate(&finish) ;
hipEventCreate(&transStart) ;
hipEventCreate(&transFinish) ;
double division=(sampleRegionEnd-sampleRegionStart)/((double)(numberOfSamples));
// Float. //
hipEventRecord(start, 0) ;
// Eval. //
float * gpuFloatData ;
hipMalloc((void**) &gpuFloatData, sizeof(float)*numResults) ;
hipLaunchKernelGGL(( evalSamples), dim3(dim3GridOuter),dim3(dim3BlockOuter), 0, 0, order, numberOfSamples, float(sampleRegionStart), float(division), gpuFloatData) ;
// Write Back. //
hipEventRecord(transStart,0) ;
hipMemcpy(resultsFloatGpu,gpuFloatData,sizeof(float)*numResults, hipMemcpyDeviceToHost) ;
hipEventRecord(transFinish,0) ;
hipEventSynchronize(transFinish) ;
hipEventElapsedTime(&elapsedTime, transStart, transFinish);
transferTimeFloat = elapsedTime/1E3 ;
hipFree(gpuFloatData) ;
hipEventRecord(finish, 0) ;
hipEventSynchronize(finish) ;
hipEventElapsedTime(&elapsedTime, start, finish);
timeTotalGpuFloat = elapsedTime/1E3 ;
// Double. //
hipEventRecord(start, 0) ;
// Eval. //
double * gpuDoubleData ;
hipMalloc((void**) &gpuDoubleData, sizeof(double)*numResults) ;
hipLaunchKernelGGL(( evalSamples), dim3(dim3GridOuter),dim3(dim3BlockOuter), 0, 0, order, numberOfSamples, sampleRegionStart, division, gpuDoubleData) ;
// Write Back. //
hipEventRecord(transStart,0) ;
hipMemcpy(resultsDoubleGpu,gpuDoubleData,sizeof(double)*numResults, hipMemcpyDeviceToHost) ;
hipEventRecord(transFinish,0) ;
hipEventSynchronize(transFinish) ;
hipEventElapsedTime(&elapsedTime, transStart, transFinish);
transferTimeDouble = elapsedTime/1E3 ;
hipFree(gpuDoubleData) ;
hipEventRecord(finish, 0) ;
hipEventSynchronize(finish) ;
hipEventElapsedTime(&elapsedTime, start, finish);
timeTotalGpuDouble = elapsedTime/1E3 ;
} /* ----- end of function cudaRunExponentials ----- */
|
integral.cu
|
/*
* =====================================================================================
*
* Filename: integral.cu
*
* Description: Cuda source code for evaluating exponential integral.
*
* Version: 1.0
* Created: 04/13/2016 03:37:44 PM
* Revision: none
* Compiler: nvcc
*
* Author: Michael Tierney (MT), [email protected]
*
* =====================================================================================
*/
#include "integral.hpp"
#include "stdio.h"
#include <math_constants.h>
#define WARPSIZE 32
#define MAXEVALS 1E7
#define FEPSILON 1.19209e-07
#define DEPSILON 2.22045e-16
// Helpful templated getters for GPU EPS and MAX vals.
template <typename data>
__inline__ __device__ data getEulerC() {
return 0.5772157 ;
}
template <>
__inline__ __device__ float getEulerC<>() {
return 0.5772157 ;
}
template <>
__inline__ __device__ double getEulerC<>() {
return 0.5772156649015329 ;
}
template <typename data>
__inline__ __device__ data getMaxVal() {
return CUDART_INF_F ;
}
template <>
__inline__ __device__ float getMaxVal<>() {
return CUDART_INF_F ;
}
template <>
__inline__ __device__ double getMaxVal<>() {
return CUDART_INF ;
}
template <typename data>
__inline__ __device__ data getEPS() {
return FEPSILON ;
}
template <>
__inline__ __device__ float getEPS<>() {
return FEPSILON ;
}
template <>
__inline__ __device__ double getEPS<>() {
return DEPSILON ;
}
/*
* === FUNCTION ======================================================================
* Name: integral
* Arguments: int orderm1 - The order n minus 1.
* Datatype arg - The argument x in E(n,x)
* Returns: The value of the integral \int_1^\inf \frac{e^{-xt}}{t^n} dt
* Description: Evaluates the integral E_n using a continued fraction series for x > 1.
* =====================================================================================
*/
template <typename DataType>
__inline__ __device__ DataType evalExpIntegralGt1(int orderm1, DataType arg) {
DataType del ;
DataType a = 0 ;
DataType b = arg+orderm1+1 ;
DataType c = getMaxVal<DataType>() ;
DataType d = 1.0/b ;
DataType h = d ;
DataType eps = getEPS<DataType>() ;
for (int i = 1 ; i <= MAXEVALS ; i++) {
a = -i*(orderm1+i) ;
b += 2.0 ;
d = 1.0/(a*d+b) ;
c = b+a/c ;
del = c*d ;
h *= del ;
if (fabs(del-1.0) <= eps) {
return h*exp(-arg) ;
}
}
return 0 ;
}
/*
* === FUNCTION ======================================================================
* Name: integral
* Arguments: int orderm1 - The order n minus 1.
* Datatype arg - The argument x in E(n,x)
* Returns: The value of the integral \int_1^\inf \frac{e^{-xt}}{t^n} dt
* Description: Evaluates the integral E_n using a converging series for x < 1.
* =====================================================================================
*/
template <typename DataType>
__inline__ __device__ DataType evalExpIntegralLt1(int orderm1, DataType arg) {
DataType ans = (orderm1 !=0 ? 1.0/orderm1 : -log(arg)-getEulerC<DataType>()) ;
DataType fact = 1.0 ;
DataType del = 0.0 ;
DataType psi = 0.0 ;
DataType eps = getEPS<DataType>() ;
DataType meuler = -getEulerC<DataType>() ;
for (DataType i = 1 ; i <= MAXEVALS ; i++) {
fact *= -arg/i ;
if (i != orderm1) {
del = -fact/(i-orderm1) ;
} else {
psi = meuler ;
for (DataType ii = 1; ii <= orderm1 ; ii++) {
psi += 1.0/ii ;
}
del = fact*(-log(arg)+psi) ;
}
ans += del ;
if (fabs(del) < fabs(ans)*eps) {
return ans ;
}
}
return 0 ;
}
/*
* === FUNCTION ======================================================================
* Name: evalSamples
* Arguments: int numOrders - The maximum number of orders to evaluate.
* int numberOfSamples - The number of samples to take.
* Datatype sampleRegionStart - The start or the region (a,b)
* Datatype division - Distance bewteen evaluations in (a,b)
* Datatype * gpuData - Location of data on GPU.
* Description: Evaluates E_(n,x) over domain n element of (1,n) and x element of
* (a,b) where there are numSamples evaluations of x.
* =====================================================================================
*/
template <typename DataType>
__global__ void evalSamples(int numOrders, int numberOfSamples, DataType sampleRegionStart, DataType division, DataType * gpuData) {
int globalIDx = threadIdx.x + blockIdx.x*blockDim.x ;
int globalIDy = threadIdx.y + blockIdx.y*blockDim.y ;
if (globalIDy < numberOfSamples && globalIDx < numOrders) {
DataType x = sampleRegionStart+(globalIDy+1)*division ;
if (x > 1) {
gpuData[globalIDx*numberOfSamples+globalIDy] = evalExpIntegralGt1(globalIDx,x) ;
} else {
gpuData[globalIDx*numberOfSamples+globalIDy] = evalExpIntegralLt1(globalIDx,x) ;
}
}
}
/*
* === FUNCTION ======================================================================
* Name: cudaRunExponentials
* Arguments: int order - The maximum order being evaluated.
* int numberOfSamples - The number of samples to take in domain (a,b)
* double & sampleRegionStart - The start of the domain (a,b)
* double & sampleRegionEnd - The end of the interval (a,b)
* float * resultsFloatGpu - The results for the GPU evaluations.
* double * resultsDoubleGpu - The results for the GP evaluations.
* double & timeTotalGpuFloat - Time taken to evaluate floats on GPU.
* double & timeTotalGpuDouble - Time taken to evaluate doubles on GPU.
* int blockSizeOr - The block size associated with orders.
* int blockSizeSm - The block size associated with samples.
* double & transferTimeFloat - Time taken to transfer data from GPU to DRAM.
* double & transferTimeDouble - Time taken to transfer data from GPU to DRAM.
*
* Description: Evaluates the exponential integral between (a,b) for a number of
* orders and samples.
*
* =====================================================================================
*/
void cudaRunExponentials(int order, int numberOfSamples, double & sampleRegionStart, double & sampleRegionEnd,
float * resultsFloatGpu, double * resultsDoubleGpu, double & timeTotalGpuFloat, double & timeTotalGpuDouble,
int blockSizeOr, int blockSizeSm, double & transferTimeFloat, double & transferTimeDouble) {
int numResults = numberOfSamples*order ;
dim3 dim3BlockOuter(blockSizeOr,blockSizeSm) ;
dim3 dim3GridOuter((order/dim3BlockOuter.x) + (!(order%dim3BlockOuter.x)?0:1) ,
(numberOfSamples/dim3BlockOuter.y) + (!(numberOfSamples%dim3BlockOuter.y)?0:1));
float elapsedTime ;
cudaEvent_t start, finish ;
cudaEvent_t transStart, transFinish ;
cudaEventCreate(&start) ;
cudaEventCreate(&finish) ;
cudaEventCreate(&transStart) ;
cudaEventCreate(&transFinish) ;
double division=(sampleRegionEnd-sampleRegionStart)/((double)(numberOfSamples));
// Float. //
cudaEventRecord(start, 0) ;
// Eval. //
float * gpuFloatData ;
cudaMalloc((void**) &gpuFloatData, sizeof(float)*numResults) ;
evalSamples<<<dim3GridOuter,dim3BlockOuter>>>(order, numberOfSamples, float(sampleRegionStart), float(division), gpuFloatData) ;
// Write Back. //
cudaEventRecord(transStart,0) ;
cudaMemcpy(resultsFloatGpu,gpuFloatData,sizeof(float)*numResults, cudaMemcpyDeviceToHost) ;
cudaEventRecord(transFinish,0) ;
cudaEventSynchronize(transFinish) ;
cudaEventElapsedTime(&elapsedTime, transStart, transFinish);
transferTimeFloat = elapsedTime/1E3 ;
cudaFree(gpuFloatData) ;
cudaEventRecord(finish, 0) ;
cudaEventSynchronize(finish) ;
cudaEventElapsedTime(&elapsedTime, start, finish);
timeTotalGpuFloat = elapsedTime/1E3 ;
// Double. //
cudaEventRecord(start, 0) ;
// Eval. //
double * gpuDoubleData ;
cudaMalloc((void**) &gpuDoubleData, sizeof(double)*numResults) ;
evalSamples<<<dim3GridOuter,dim3BlockOuter>>>(order, numberOfSamples, sampleRegionStart, division, gpuDoubleData) ;
// Write Back. //
cudaEventRecord(transStart,0) ;
cudaMemcpy(resultsDoubleGpu,gpuDoubleData,sizeof(double)*numResults, cudaMemcpyDeviceToHost) ;
cudaEventRecord(transFinish,0) ;
cudaEventSynchronize(transFinish) ;
cudaEventElapsedTime(&elapsedTime, transStart, transFinish);
transferTimeDouble = elapsedTime/1E3 ;
cudaFree(gpuDoubleData) ;
cudaEventRecord(finish, 0) ;
cudaEventSynchronize(finish) ;
cudaEventElapsedTime(&elapsedTime, start, finish);
timeTotalGpuDouble = elapsedTime/1E3 ;
} /* ----- end of function cudaRunExponentials ----- */
|
db59c852a685d975bd2be2d080631c1a38689df5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel( )
{
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
hipSetDevice(MYDEVICE);
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( );
// Part 2 of 5: configure and launch kernel
dim3 dimGrid( );
dim3 dimBlock( );
hipLaunchKernelGGL(( myFirstKernel), dim3() dim3(),dim3()dim3() dim3(), 0, 0, );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
hipMemcpy( );
// Check for any CUDA errors
checkCUDAError("hipMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < 8 ; ++i)
{
for (int j = 0; j < 8 ; ++j)
{
// assert(h_a[i * numThreadsPerBlock + j] == i + j);
}
}
// free device memory
hipFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << hipGetErrorString(err) << std::endl;
exit(-1);
}
}
|
db59c852a685d975bd2be2d080631c1a38689df5.cu
|
#include <assert.h>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel( )
{
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
cudaSetDevice(MYDEVICE);
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( );
// Part 2 of 5: configure and launch kernel
dim3 dimGrid( );
dim3 dimBlock( );
myFirstKernel<<< , >>>( );
// block until the device has completed
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy( );
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < 8 ; ++i)
{
for (int j = 0; j < 8 ; ++j)
{
// assert(h_a[i * numThreadsPerBlock + j] == i + j);
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
|
587edb2a6b470513a1aada8177cb482b3fa763c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Code originally written by Richard O. Lee
//Modified by Christian Bienia and Christian Fensch
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
#include <math.h>
#include <stdint.h>
#include <assert.h>
#include <cutil.h>
#define CELL_PARTICLES 16
void CudaSafeCall(int lineno, hipError_t err) {
// hipError_t err = hipGetLastError();
if( hipSuccess != err) {
printf("Cuda error: line %d: %s.\n", lineno, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
static inline int isLittleEndian() {
union {
uint16_t word;
uint8_t byte;
} endian_test;
endian_test.word = 0x00FF;
return (endian_test.byte == 0xFF);
}
union __float_and_int {
uint32_t i;
float f;
};
static inline float bswap_float(float x) {
union __float_and_int __x;
__x.f = x;
__x.i = ((__x.i & 0xff000000) >> 24) | ((__x.i & 0x00ff0000) >> 8) |
((__x.i & 0x0000ff00) << 8) | ((__x.i & 0x000000ff) << 24);
return __x.f;
}
static inline int bswap_int32(int x) {
return ( (((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) |
(((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24) );
}
////////////////////////////////////////////////////////////////////////////////
// note: icc-optimized version of this class gave 15% more
// performance than our hand-optimized SSE3 implementation
/*
class Vec3 {
public:
float x, y, z;
__device__ Vec3() {}
__device__ Vec3(float _x, float _y, float _z) : x(_x), y(_y), z(_z) {}
__device__ float GetLengthSq() const { return x*x + y*y + z*z; }
__device__ float GetLength() const { return sqrtf(GetLengthSq()); }
__device__ Vec3 & Normalize() { return *this /= GetLength(); }
__device__ Vec3 & operator += (Vec3 const &v) { x += v.x; y += v.y; z += v.z; return *this; }
__device__ Vec3 & operator -= (Vec3 const &v) { x -= v.x; y -= v.y; z -= v.z; return *this; }
__device__ Vec3 & operator *= (float s) { x *= s; y *= s; z *= s; return *this; }
__device__ Vec3 & operator /= (float s) { x /= s; y /= s; z /= s; return *this; }
__device__ Vec3 operator + (Vec3 const &v) const { return Vec3(x+v.x, y+v.y, z+v.z); }
__device__ Vec3 operator - () const { return Vec3(-x, -y, -z); }
__device__ Vec3 operator - (Vec3 const &v) const { return Vec3(x-v.x, y-v.y, z-v.z); }
__device__ Vec3 operator * (float s) const { return Vec3(x*s, y*s, z*s); }
__device__ Vec3 operator / (float s) const { return Vec3(x/s, y/s, z/s); }
__device__ float operator * (Vec3 const &v) const { return x*v.x + y*v.y + z*v.z; }
};
*/
typedef struct Vec3 {
float x;
float y;
float z;
} Vec3;
struct kernel_consts {
float h;
float hSq;
float densityCoeff;
float pressureCoeff;
float viscosityCoeff;
float tc_orig;
Vec3 delta;
};
struct kernel_consts host;
//device memory
struct kernel_consts *dev;
#warning we use dynamic memory here FIXME
/*
__device__ float h;
__device__ float hSq;
__device__ float densityCoeff;
__device__ float pressureCoeff;
__device__ float viscosityCoeff;
__device__ float tc_orig;
__device__ Vec3 delta;
*/
__host__ __device__
inline Vec3 *operator_add (Vec3 *n,const Vec3 *v,const Vec3 *s) { n->x=v->x+s->x; n->y=v->y+s->y; n->z=v->z+s->z; return n;}
__host__ __device__
inline Vec3 *operator_sub (Vec3 *n,const Vec3 *v,const Vec3 *s) { n->x=v->x-s->x; n->y=v->y-s->y; n->z=v->z-s->z; return n;}
__host__ __device__
inline Vec3 *operator_mult (Vec3 *n,const Vec3 *v,const float s) { n->x=v->x*s; n->y=v->y*s; n->z=v->z*s; return n;}
__host__ __device__
inline Vec3 *operator_div (Vec3 *n,const Vec3 *v,const float s) { n->x=v->x/s; n->y=v->y/s; n->z=v->z/s; return n;}
__host__ __device__
inline Vec3 *operator_minus (Vec3 *n,const Vec3 *v) { n->x=-v->x; n->y=-v->y; n->z=-v->z; return n;}
__host__ __device__
inline float operator_mult_to_float (const Vec3 *v,const Vec3 *s) { return s->x*v->x + s->y*v->y + s->z*v->z; }
__device__
inline float GetLengthSq(Vec3 *v) { return operator_mult_to_float(v,v); }
__device__
inline float GetLength(Vec3 *v) { return sqrtf(GetLengthSq(v)); }
__device__
inline Vec3 *Normalize(Vec3 *v) { return operator_div(v,v,GetLength(v)); }
////////////////////////////////////////////////////////////////////////////////
// there is a current limitation of CELL_PARTICLES particles per cell
// (this structure use to be a simple linked-list of particles but, due to
// improved cache locality, we get a huge performance increase by copying
// particles instead of referencing them)
struct Cell
{
Vec3 p[CELL_PARTICLES];
Vec3 hv[CELL_PARTICLES];
Vec3 v[CELL_PARTICLES];
Vec3 a[CELL_PARTICLES];
float density[CELL_PARTICLES];
//int debug[CELL_PARTICLES];
};
////////////////////////////////////////////////////////////////////////////////
const float timeStep = 0.005f;
const float doubleRestDensity = 2000.f;
const float kernelRadiusMultiplier = 1.695f;
const float h_stiffness = 1.5f;
const float viscosity = 0.4f;
__device__ const Vec3 externalAcceleration = {0.f, -9.8f, 0.f};
__device__ const Vec3 domainMin = {-0.065f, -0.08f, -0.065f};
__device__ const Vec3 domainMax = { 0.065f, 0.1f, 0.065f };
const Vec3 h_domainMin = {-0.065f, -0.08f, -0.065f};
const Vec3 h_domainMax = { 0.065f, 0.1f, 0.065f };
float restParticlesPerMeter;
// number of grid cells in each dimension
int nx;
int ny;
int nz;
int origNumParticles = 0;
int numParticles = 0;
int numCells = 0;
//device memory
Cell *cells;
int *cnumPars;
Cell *cells2;
int *cnumPars2;
//host memory
Cell *h_cells;
int *h_cnumPars;
Cell *h_cells2;
int *h_cnumPars2;
// flags which cells lie on grid boundaries
int *h_border;
int *border;
int XDIVS = 1; // number of partitions in X
int ZDIVS = 1; // number of partitions in Z
#define NUM_GRIDS ((XDIVS) * (ZDIVS))
/**/
struct Grid
{
int sx, sy, sz;
int ex, ey, ez;
} *grids;
/**/
////////////////////////////////////////////////////////////////////////////////
/*
* hmgweight
*
* Computes the hamming weight of x
*
* x - input value
* lsb - if x!=0 position of smallest bit set, else -1
*
* return - the hamming weight
*/
unsigned int hmgweight(unsigned int x, int *lsb) {
unsigned int weight=0;
unsigned int mask= 1;
unsigned int count=0;
*lsb=-1;
while(x > 0) {
//unsigned int temp;
//temp=(x&mask);
if ((x&mask) == 1) {
weight++;
if (*lsb == -1) *lsb = count;
}
x >>= 1;
count++;
}
return weight;
}
void InitSim(char const *fileName, unsigned int threadnum) {
//Compute partitioning based on square root of number of threads
//NOTE: Other partition sizes are possible as long as XDIVS * ZDIVS == threadnum,
// but communication is minimal (and hence optimal) if XDIVS == ZDIVS
FILE *file;
int lsb;
if (hmgweight(threadnum,&lsb) != 1) {
printf("Number of threads must be a power of 2\n");
exit(1);
}
XDIVS = 1<<(lsb/2);
ZDIVS = 1<<(lsb/2);
/*
if (XDIVS*ZDIVS != threadnum) XDIVS*=2;
assert(XDIVS * ZDIVS == threadnum);
*/
grids = (struct Grid*)malloc(NUM_GRIDS*sizeof(struct Grid));
//Load input particles
printf("Loading file \"%s\"...\n",fileName);
file = fopen(fileName,"rb");
assert(file);
fread(&restParticlesPerMeter,4,1,file);
fread(&origNumParticles,4,1,file);
if (!isLittleEndian()) {
restParticlesPerMeter = bswap_float(restParticlesPerMeter);
origNumParticles = bswap_int32(origNumParticles);
}
numParticles = origNumParticles;
printf("restParticlesPerMeter: %f\norigNumParticles: %d\n",restParticlesPerMeter,origNumParticles);
float h_h = kernelRadiusMultiplier / restParticlesPerMeter;
float h_hSq = h_h*h_h;
float h_tc_orig = h_hSq*h_hSq*h_hSq;
printf("h_h: %f\n",h_h);
const float pi = 3.14159265358979f;
float coeff1 = 315.f / (64.f*pi*pow(h_h,9.f));
float coeff2 = 15.f / (pi*pow(h_h,6.f));
float coeff3 = 45.f / (pi*pow(h_h,6.f));
float particleMass = 0.5f*doubleRestDensity / (restParticlesPerMeter*restParticlesPerMeter*restParticlesPerMeter);
float h_densityCoeff = particleMass * coeff1;
float h_pressureCoeff = 3.f*coeff2 * 0.5f*h_stiffness * particleMass;
float h_viscosityCoeff = viscosity * coeff3 * particleMass;
Vec3 range;
operator_sub(&range,&h_domainMax,&h_domainMin);
nx = (int)(range.x / h_h);
ny = (int)(range.y / h_h);
nz = (int)(range.z / h_h);
assert(nx >= 1 && ny >= 1 && nz >= 1);
numCells = nx*ny*nz;
printf("Number of cells: %d\n",numCells);
Vec3 h_delta;
h_delta.x = range.x / nx;
h_delta.y = range.y / ny;
h_delta.z = range.z / nz;
assert(h_delta.x >= h_h && h_delta.y >= h_h && h_delta.z >= h_h);
assert(nx >= XDIVS && nz >= ZDIVS);
/* this determines the size of the grid (in gpu world these are the blocks) */
int gi = 0;
int sx, sz, ex, ez;
ex = 0;
for (int i = 0; i < XDIVS; ++i)
{
sx = ex;
ex = int(float(nx)/float(XDIVS) * (i+1) + 0.5f);
assert(sx < ex);
//printf("dimx : %d\n",ex-sx);
ez = 0;
for (int j = 0; j < ZDIVS; ++j, ++gi)
{
sz = ez;
ez = int(float(nz)/float(ZDIVS) * (j+1) + 0.5f);
assert(sz < ez);
//printf("dimz : %d\n",ez-sz);
grids[gi].sx = sx;
grids[gi].ex = ex;
grids[gi].sy = 0;
grids[gi].ey = ny;
grids[gi].sz = sz;
grids[gi].ez = ez;
}
}
assert(gi == NUM_GRIDS);
/**/
h_border = (int*)malloc(numCells*sizeof(int));
for (int i = 0; i < NUM_GRIDS; ++i) {
//printf("limits: (%d..%d, %d..%d, %d..%d)\n",grids[i].sx,grids[i].ex,grids[i].sy,grids[i].ey,grids[i].sz,grids[i].ez);
for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
for (int ix = grids[i].sx; ix < grids[i].ex; ++ix)
{
int index = (iz*ny + iy)*nx + ix;
h_border[index] = 0;
for (int dk = -1; dk <= 1; ++dk)
for (int dj = -1; dj <= 1; ++dj)
for (int di = -1; di <= 1; ++di)
{
int ci = ix + di;
int cj = iy + dj;
int ck = iz + dk;
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
if ( ci < grids[i].sx || ci >= grids[i].ex ||
cj < grids[i].sy || cj >= grids[i].ey ||
ck < grids[i].sz || ck >= grids[i].ez ) {
h_border[index] = 1;
}
}
}
}
/**/
//device memory
CudaSafeCall( __LINE__, hipMalloc((void**)&cells, numCells * sizeof(struct Cell)) );
CudaSafeCall( __LINE__, hipMalloc((void**)&cnumPars, numCells * sizeof(int)) );
CudaSafeCall( __LINE__, hipMalloc((void**)&cells2, numCells * sizeof(struct Cell)) );
CudaSafeCall( __LINE__, hipMalloc((void**)&cnumPars2, numCells * sizeof(int)) );
CudaSafeCall( __LINE__, hipMalloc((void**)&border, numCells * sizeof(int)) );
CudaSafeCall ( __LINE__, hipMemcpy(border, h_border, numCells*sizeof(int), hipMemcpyHostToDevice) );
assert(h_border && border);
//host memory
h_cells = (struct Cell*)malloc(numCells * sizeof(struct Cell));
h_cnumPars = (int*)calloc(numCells,sizeof(int));
h_cells2 = (struct Cell*)malloc(numCells * sizeof(struct Cell));
h_cnumPars2 = (int*)calloc(numCells,sizeof(int));
assert(cells && cnumPars);
assert(cells2 && cnumPars2);
assert(h_cells && h_cnumPars);
assert(h_cells2 && h_cnumPars2);
printf("sizeof(struct Cell) * numCells : %d * %d = %d\n",sizeof(struct Cell), numCells,sizeof(struct Cell)*numCells);
printf("sizeof(int) * numCells : %d * %d = %d\n",sizeof(int), numCells,sizeof(int)*numCells);
printf("total device memory: %d\n",2*numCells*(sizeof(struct Cell)+sizeof(int)));
assert(2*numCells*(sizeof(struct Cell)+sizeof(int))< 536543232); //my card has 512MB of global memory
//we used calloc instead
//memset(h_cnumPars2, 0, numCells*sizeof(int));
float px, py, pz, hvx, hvy, hvz, vx, vy, vz;
for (int i = 0; i < origNumParticles; ++i)
{
fread(&px, 4,1,file);
fread(&py, 4,1,file);
fread(&pz, 4,1,file);
fread(&hvx, 4,1,file);
fread(&hvy, 4,1,file);
fread(&hvz, 4,1,file);
fread(&vx, 4,1,file);
fread(&vy, 4,1,file);
fread(&vz, 4,1,file);
if (!isLittleEndian()) {
px = bswap_float(px);
py = bswap_float(py);
pz = bswap_float(pz);
hvx = bswap_float(hvx);
hvy = bswap_float(hvy);
hvz = bswap_float(hvz);
vx = bswap_float(vx);
vy = bswap_float(vy);
vz = bswap_float(vz);
}
int ci = (int)((px - h_domainMin.x) / h_delta.x);
int cj = (int)((py - h_domainMin.y) / h_delta.y);
int ck = (int)((pz - h_domainMin.z) / h_delta.z);
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
int index = (ck*ny + cj)*nx + ci;
Cell &cell = h_cells2[index];
int np = h_cnumPars2[index];
if (np < CELL_PARTICLES)
{
cell.p[np].x = px;
cell.p[np].y = py;
cell.p[np].z = pz;
cell.hv[np].x = hvx;
cell.hv[np].y = hvy;
cell.hv[np].z = hvz;
cell.v[np].x = vx;
cell.v[np].y = vy;
cell.v[np].z = vz;
++h_cnumPars2[index];
}
else
--numParticles;
}
fclose(file);
host.h = h_h;
host.hSq = h_hSq;
host.densityCoeff = h_densityCoeff;
host.pressureCoeff = h_pressureCoeff;
host.viscosityCoeff = h_viscosityCoeff;
host.tc_orig = h_tc_orig;
host.delta.x = h_delta.x;
host.delta.y = h_delta.y;
host.delta.z = h_delta.z;
CudaSafeCall( __LINE__, hipMalloc((void**)&dev, sizeof(struct kernel_consts)) );
CudaSafeCall ( __LINE__, hipMemcpy(dev, &host, sizeof(struct kernel_consts), hipMemcpyHostToDevice) );
/*
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("h", &h_h, sizeof(float), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("hSq", &h_hSq, sizeof(float), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("densityCoeff", &h_densityCoeff, sizeof(float), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("pressureCoeff", &h_pressureCoeff, sizeof(float), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("viscosityCoeff", &h_viscosityCoeff, sizeof(float), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("delta", &h_delta, sizeof(struct Vec3), 0, hipMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, hipMemcpyToSymbol("tc_orig", &h_tc_orig, sizeof(float), 0, hipMemcpyHostToDevice) );
*/
printf("Number of particles: %d (%d) skipped\n",numParticles,origNumParticles-numParticles);
}
////////////////////////////////////////////////////////////////////////////////
void SaveFile(char const *fileName) {
printf("Saving file \"%s\"...\n", fileName);
FILE *file;
file = fopen(fileName,"wb+");
assert(file);
if (!isLittleEndian()) {
float restParticlesPerMeter_le;
int origNumParticles_le;
restParticlesPerMeter_le = bswap_float(restParticlesPerMeter);
origNumParticles_le = bswap_int32(origNumParticles);
fwrite(&restParticlesPerMeter_le, 4,1,file);
fwrite(&origNumParticles_le, 4,1,file);
} else {
fwrite((char *)&restParticlesPerMeter, 4,1,file);
fwrite((char *)&origNumParticles, 4,1,file);
}
int count = 0;
for (int i = 0; i < numCells; ++i) {
Cell const &cell = h_cells[i];
int np = h_cnumPars[i];
//printf("np: %d\n",np);
for (int j = 0; j < np; ++j) {
if (!isLittleEndian()) {
float px, py, pz, hvx, hvy, hvz, vx,vy, vz;
px = bswap_float(cell.p[j].x);
py = bswap_float(cell.p[j].y);
pz = bswap_float(cell.p[j].z);
hvx = bswap_float(cell.hv[j].x);
hvy = bswap_float(cell.hv[j].y);
hvz = bswap_float(cell.hv[j].z);
vx = bswap_float(cell.v[j].x);
vy = bswap_float(cell.v[j].y);
vz = bswap_float(cell.v[j].z);
fwrite((char *)&px, 4,1,file);
fwrite((char *)&py, 4,1,file);
fwrite((char *)&pz, 4,1,file);
fwrite((char *)&hvx, 4,1,file);
fwrite((char *)&hvy, 4,1,file);
fwrite((char *)&hvz, 4,1,file);
fwrite((char *)&vx, 4,1,file);
fwrite((char *)&vy, 4,1,file);
fwrite((char *)&vz, 4,1,file);
} else {
fwrite((char *)&cell.p[j].x, 4,1,file);
fwrite((char *)&cell.p[j].y, 4,1,file);
fwrite((char *)&cell.p[j].z, 4,1,file);
fwrite((char *)&cell.hv[j].x, 4,1,file);
fwrite((char *)&cell.hv[j].y, 4,1,file);
fwrite((char *)&cell.hv[j].z, 4,1,file);
fwrite((char *)&cell.v[j].x, 4,1,file);
fwrite((char *)&cell.v[j].y, 4,1,file);
fwrite((char *)&cell.v[j].z, 4,1,file);
}
++count;
}
}
assert(count == numParticles);
int numSkipped = origNumParticles - numParticles;
float zero = 0.f;
if (!isLittleEndian()) {
zero = bswap_float(zero);
}
for (int i = 0; i < numSkipped; ++i)
{
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
}
fflush(file);
fclose(file);
}
////////////////////////////////////////////////////////////////////////////////
void CleanUpSim()
{
//free host memory
free(h_cells2);
free(h_cnumPars2);
free(grids);
free(h_border);
//free device memory
CudaSafeCall( __LINE__, hipFree(border) );
CudaSafeCall( __LINE__, hipFree(dev) );
CudaSafeCall( __LINE__, hipFree(cells) );
CudaSafeCall( __LINE__, hipFree(cnumPars) );
CudaSafeCall( __LINE__, hipFree(cells2) );
CudaSafeCall( __LINE__, hipFree(cnumPars2) );
}
////////////////////////////////////////////////////////////////////////////////
// idx = (iz*ny + iy)*nx + ix
#define GET_IDX_X(idx) ((idx) % (blockDim.x * gridDim.x))
#define SKIP_DIM_X(idx) (((idx) - GET_IDX_X(idx)) / (blockDim.x * gridDim.x))
#define GET_IDX_Y(idx) (SKIP_DIM_X(idx) % (blockDim.y * gridDim.y))
#define GET_IDX_Z(idx) ((SKIP_DIM_X(idx) - GET_IDX_Y(idx)) / (blockDim.y * gridDim.y))
__device__ int InitNeighCellList(int ci, int cj, int ck, int *neighCells, int *cnumPars) {
int numNeighCells = 0;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int nz = blockDim.z * gridDim.z;
for (int di = -1; di <= 1; ++di)
for (int dj = -1; dj <= 1; ++dj)
for (int dk = -1; dk <= 1; ++dk) {
int ii = ci + di;
int jj = cj + dj;
int kk = ck + dk;
if (ii >= 0 && ii < nx && jj >= 0 && jj < ny && kk >= 0 && kk < nz) {
int index = (kk*ny + jj)*nx + ii;
//consider only cell neighbors who acltually have particles
if (cnumPars[index] != 0) {
neighCells[numNeighCells] = index;
}
++numNeighCells;
}
}
return numNeighCells;
}
////////////////////////////////////////////////////////////////////////////////
__global__ void big_kernel(Cell *cells, int *cnumPars,Cell *cells2, int *cnumPars2,struct kernel_consts *dev,int *border) {
int ix;
int iy;
int iz;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int nz = blockDim.z * gridDim.z;
ix = blockIdx.x * blockDim.x + threadIdx.x;
iy = blockIdx.y * blockDim.y + threadIdx.y;
iz = blockIdx.z * blockDim.z + threadIdx.z;
//printf("x: %d : %d\n",nx,blockDim.x * gridDim.x);
//printf("y: %d : %d\n",ny,blockDim.y * gridDim.y);
//printf("z: %d : %d\n",nz,blockDim.z * gridDim.z);
//move common declarations on top
int index = (iz*ny + iy)*nx + ix;
int np; //internal loop limit
//this should be moved to shared memory
Cell &cell = cells[index]; //just a reference to the correspondig cell //FIXME
int neighCells[27];
//it is safe to move the call here, neighbours do not change between the two original calls
//move this computation to cpu
//const float tc_orig = hSq*hSq*hSq;
const float parSize = 0.0002f;
const float epsilon = 1e-10f;
const float stiffness = 30000.f;
const float damping = 128.f;
/*
for (i=0;i<27;i++) {
neighCells[i] = 0xffffffff;
}
*/
int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells,cnumPars);
/*
//printf("thread %d: number of neighbors: %d\n",index,numNeighCells);
for (int i=0;i<numNeighCells;i++) {
printf("thread %d : %d-th neighbor %d\n",index,i,neighCells[i]);
}
*/
////////////////////////////////////////////////////////////////////////////////
//void ClearParticlesMT(int i) {
////////////////////////////////////////////////////////////////////////////////
/**/
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
cnumPars[index] = 0;
// } //close nested loop;
__syncthreads();
//} close ClearParticlesMT()
////////////////////////////////////////////////////////////////////////////////
//void RebuildGridMT(int i) {
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
Cell const &cell2 = cells2[index];
int np2 = cnumPars2[index];
for (int j = 0; j < np2; ++j) {
int ci = (int)((cell2.p[j].x - domainMin.x) / dev->delta.x);
int cj = (int)((cell2.p[j].y - domainMin.y) / dev->delta.y);
int ck = (int)((cell2.p[j].z - domainMin.z) / dev->delta.z);
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
int index2 = (ck*ny + cj)*nx + ci;
// this assumes that particles cannot travel more than one grid cell per time step
int np_renamed = cnumPars[index2];
if (border[index2]) {
//use atomic
atomicAdd(&cnumPars[index2],1);
} else {
cnumPars[index2]++;
}
//#warning what if we exceed CELL_PARTICLES particles per cell here??
//from what I see is that we calculate the same frame over and over
//so every cell has at most CELL_PARTICLES particles, from the initialisation
Cell &cell_renamed = cells[index2];
cell_renamed.p[np_renamed].x = cell2.p[j].x;
cell_renamed.p[np_renamed].y = cell2.p[j].y;
cell_renamed.p[np_renamed].z = cell2.p[j].z;
cell_renamed.hv[np_renamed].x = cell2.hv[j].x;
cell_renamed.hv[np_renamed].y = cell2.hv[j].y;
cell_renamed.hv[np_renamed].z = cell2.hv[j].z;
cell_renamed.v[np_renamed].x = cell2.v[j].x;
cell_renamed.v[np_renamed].y = cell2.v[j].y;
cell_renamed.v[np_renamed].z = cell2.v[j].z;
//cell_renamed.debug[np_renamed] = index2;
}
// } //close nested loops
__syncthreads();
//} close RebuildGridMT()
////////////////////////////////////////////////////////////////////////////////
//void InitDensitiesAndForcesMT(int i) {
//from now on we don't change the cnumPars[index]
np = cnumPars[index]; //internal loop limit
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
cell.density[j] = 0.f;
cell.a[j].x = externalAcceleration.x;
cell.a[j].y = externalAcceleration.y;
cell.a[j].z = externalAcceleration.z;
}
// } //close nested loops
__syncthreads();
//} close InitDensitiesAndForcesMT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeDensitiesMT(int i) {
// int neighCells[27];
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// int np = cnumPars[index];
// if (np == 0) continue;
//
// if np==0 we do net enter the following loop
// int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells);
// Cell &cell = cells[index];
Vec3 tmp;
for (int j = 0; j < np; ++j)
for (int inc = 0; inc < numNeighCells; ++inc) {
int indexNeigh = neighCells[inc];
Cell &neigh = cells[indexNeigh];
int numNeighPars = cnumPars[indexNeigh];
for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh)
if (&neigh.p[iparNeigh] < &cell.p[j]) {
//float distSq = (cell.p[j] - neigh.p[iparNeigh]).GetLengthSq();
float distSq;
operator_sub(&tmp,&cell.p[j],&neigh.p[iparNeigh]);
distSq = GetLengthSq(&tmp);
if (distSq < dev->hSq) {
float t = dev->hSq - distSq;
float tc = t*t*t;
if (border[index]) {
//use atomic
atomicAdd(&cell.density[j],tc);
} else {
cell.density[j] += tc;
}
if (border[indexNeigh]) {
//use atomic
atomicAdd(&neigh.density[iparNeigh],tc);
} else {
neigh.density[iparNeigh] += tc;
}
}
}
;
}
// } //close nested loops
__syncthreads();
//} close ComputeDensitiesMT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeDensities2MT(int i) {
// const float tc = hSq*hSq*hSq;
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
cell.density[j] += dev->tc_orig;
cell.density[j] *= dev->densityCoeff;
}
// } //close nested loops
__syncthreads();
//} close ComputeDensities2MT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeForcesMT(int i) {
// int neighCells[27];
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// int np = cnumPars[index];
// if (np == 0) continue;
//
// if np==0 we do net enter the following loop
// int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells);
// Cell &cell = cells[index];
for (int j = 0; j < np; ++j)
for (int inc = 0; inc < numNeighCells; ++inc) {
int indexNeigh = neighCells[inc];
Cell &neigh = cells[indexNeigh];
int numNeighPars = cnumPars[indexNeigh];
for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh)
if (&neigh.p[iparNeigh] < &cell.p[j]) {
//Vec3 disp = cell.p[j] - neigh.p[iparNeigh];
//float distSq = disp.GetLengthSq();
Vec3 disp;
operator_sub(&disp,&cell.p[j],&neigh.p[iparNeigh]);
float distSq = GetLengthSq(&disp);
if (distSq < dev->hSq) {
//float dist = sqrtf(::max(distSq, 1e-12f));
float dist = sqrtf(fmax(distSq, 1e-12f));
float hmr = dev->h - dist;
//Vec3 acc = disp * pressureCoeff * (hmr*hmr/dist) *
// (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity);
//acc += (neigh.v[iparNeigh] - cell.v[j]) * viscosityCoeff * hmr;
//acc /= cell.density[j] * neigh.density[iparNeigh];
Vec3 acc;
operator_mult(&acc,&disp, dev->pressureCoeff * (hmr*hmr/dist) *
(cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity));
operator_sub(&tmp,&neigh.v[iparNeigh],&cell.v[j]);
operator_mult(&tmp,&tmp,dev->viscosityCoeff * hmr);
operator_add(&acc,&acc,&tmp);
operator_div(&acc,&acc,cell.density[j] * neigh.density[iparNeigh]);
if (border[index]) {
//use atomics
#warning this works because no one reads these values at the moment ??
atomicAdd(&cell.a[j].x,acc.x);
atomicAdd(&cell.a[j].y,acc.y);
atomicAdd(&cell.a[j].z,acc.z);
} else {
operator_add(&cell.a[j],&cell.a[j],&acc);
}
if (border[indexNeigh]) {
//use atomics
#warning this works because no one reads these values at the moment ??
//reminder: there is no atomicSub for floats, so we add the negative value
atomicAdd(&neigh.a[iparNeigh].x,-acc.x);
atomicAdd(&neigh.a[iparNeigh].y,-acc.y);
atomicAdd(&neigh.a[iparNeigh].z,-acc.z);
} else {
operator_sub(&neigh.a[iparNeigh],&neigh.a[iparNeigh],&acc);
}
}
}
}
// } //close nested loops
__syncthreads();
//} close ComputeForcesMT()
////////////////////////////////////////////////////////////////////////////////
//void ProcessCollisionsMT(int i) {
// const float parSize = 0.0002f;
// const float epsilon = 1e-10f;
// const float stiffness = 30000.f;
// const float damping = 128.f;
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
//Vec3 pos = cell.p[j] + cell.hv[j] * timeStep;
Vec3 pos;
operator_mult(&pos,&cell.hv[j],timeStep);
operator_add(&pos,&pos,&cell.p[j]);
float diff = parSize - (pos.x - domainMin.x);
if (diff > epsilon)
cell.a[j].x += stiffness*diff - damping*cell.v[j].x;
diff = parSize - (domainMax.x - pos.x);
if (diff > epsilon)
cell.a[j].x -= stiffness*diff + damping*cell.v[j].x;
diff = parSize - (pos.y - domainMin.y);
if (diff > epsilon)
cell.a[j].y += stiffness*diff - damping*cell.v[j].y;
diff = parSize - (domainMax.y - pos.y);
if (diff > epsilon)
cell.a[j].y -= stiffness*diff + damping*cell.v[j].y;
diff = parSize - (pos.z - domainMin.z);
if (diff > epsilon)
cell.a[j].z += stiffness*diff - damping*cell.v[j].z;
diff = parSize - (domainMax.z - pos.z);
if (diff > epsilon)
cell.a[j].z -= stiffness*diff + damping*cell.v[j].z;
}
// } //close nested loops
__syncthreads();
//} close ProcessCollisionsMT()
////////////////////////////////////////////////////////////////////////////////
//void AdvanceParticlesMT(int i) {
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
//Vec3 v_half = cell.hv[j] + cell.a[j]*timeStep;
Vec3 v_half;
operator_mult(&v_half,&cell.a[j],timeStep);
operator_add(&v_half,&v_half,&cell.hv[j]);
//cell.hv[j] = v_half;
cell.hv[j].x = v_half.x;
cell.hv[j].y = v_half.y;
cell.hv[j].z = v_half.z;
//cell.v[j] *= 0.5f;
operator_mult(&cell.v[j],&cell.v[j],0.5f);
//cell.v[j] = cell.hv[j] + v_half;
operator_add(&cell.v[j],&cell.hv[j],&v_half);
//we can change v_half now, (we want to use only one tmp variable)
//cell.p[j] += v_half * timeStep;
operator_mult(&v_half,&v_half,timeStep);
operator_add(&cell.p[j],&cell.p[j],&v_half);
}
// } //close nested loops
__syncthreads();
//} close AdvanceParticlesMT()
////////////////////////////////////////////////////////////////////////////////
/**/
} //close big_kernel()
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
int i;
int grid_x;
int grid_y;
int grid_z;
int block_x;
int block_y;
int block_z;
if (argc < 4 || argc >= 6) {
printf("Usage: %s <threadnum> <framenum> <.fluid input file> [.fluid output file]\n",argv[0]);
exit(EXIT_FAILURE);
}
int threadnum = atoi(argv[1]);
int framenum = atoi(argv[2]);
//Check arguments
if (threadnum < 1) {
printf("<threadnum> must at least be 1\n");
exit(EXIT_FAILURE);
}
if (framenum < 1) {
printf("<framenum> must at least be 1\n");
exit(EXIT_FAILURE);
}
//read input file, allocate memory, etc
InitSim(argv[3], threadnum);
//minus 1, because indexing starts from 0 and here we declare the block size
// block_x should be nx / XDIVS
// block_y should be ny //no partitioning here
// block_z should be nz / ZDIVS
grid_x = XDIVS;
grid_y = 1; //no partitioning here
grid_z = ZDIVS;
block_x = nx / XDIVS;
block_y = ny;
block_z = nz / ZDIVS;
//kernel stuff
dim3 grid(grid_x, grid_y, grid_z);
dim3 block(block_x, block_y, block_z);
//dim3 grid(grid_z, grid_x, grid_y);
//dim3 block(block_z, block_x, block_y);
//dim3 grid(1,1,1);
//dim3 block(8,8,8);
//move data to device
CudaSafeCall( __LINE__, hipMemcpy(cells2, h_cells2, numCells * sizeof(struct Cell), hipMemcpyHostToDevice) );
CudaSafeCall( __LINE__, hipMemcpy(cnumPars2, h_cnumPars2, numCells * sizeof(int), hipMemcpyHostToDevice) );
printf("grid (%d, %d, %d) block (%d, %d, %d)\n",
grid.x,grid.y,grid.z,block.x,block.y,block.z);
for (i=0;i<framenum;i++) {
hipLaunchKernelGGL(( big_kernel), dim3(grid),dim3(block), 0, 0, cells,cnumPars,cells2,cnumPars2,dev,border);
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
printf("Cuda error: line %d: %s.\n", __LINE__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
}
//move data to host
CudaSafeCall( __LINE__, hipMemcpy(h_cells, cells, numCells * sizeof(struct Cell), hipMemcpyDeviceToHost) );
CudaSafeCall( __LINE__, hipMemcpy(h_cnumPars, cnumPars, numCells * sizeof(int), hipMemcpyDeviceToHost) );
// /*debug
int j;
for (i=0;i<numCells;i++) {
//if (h_cnumPars[i]!=i) { printf("got %d : expected : %d\n",h_cnumPars[i],i); }
/*for (j=0;j<h_cnumPars[i];j++) {
if (h_cells[i].debug[j] >= numCells) {
printf("in cell %d: particle %d: index2 out of bounds: %d\n",
i,j,h_cells[i].debug[j]);
}
}*/
}
// */
if (argc > 4) {
SaveFile(argv[4]);
}
CleanUpSim();
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
|
587edb2a6b470513a1aada8177cb482b3fa763c1.cu
|
//Code originally written by Richard O. Lee
//Modified by Christian Bienia and Christian Fensch
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
#include <math.h>
#include <stdint.h>
#include <assert.h>
#include <cutil.h>
#define CELL_PARTICLES 16
void CudaSafeCall(int lineno, cudaError_t err) {
// cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("Cuda error: line %d: %s.\n", lineno, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
static inline int isLittleEndian() {
union {
uint16_t word;
uint8_t byte;
} endian_test;
endian_test.word = 0x00FF;
return (endian_test.byte == 0xFF);
}
union __float_and_int {
uint32_t i;
float f;
};
static inline float bswap_float(float x) {
union __float_and_int __x;
__x.f = x;
__x.i = ((__x.i & 0xff000000) >> 24) | ((__x.i & 0x00ff0000) >> 8) |
((__x.i & 0x0000ff00) << 8) | ((__x.i & 0x000000ff) << 24);
return __x.f;
}
static inline int bswap_int32(int x) {
return ( (((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) |
(((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24) );
}
////////////////////////////////////////////////////////////////////////////////
// note: icc-optimized version of this class gave 15% more
// performance than our hand-optimized SSE3 implementation
/*
class Vec3 {
public:
float x, y, z;
__device__ Vec3() {}
__device__ Vec3(float _x, float _y, float _z) : x(_x), y(_y), z(_z) {}
__device__ float GetLengthSq() const { return x*x + y*y + z*z; }
__device__ float GetLength() const { return sqrtf(GetLengthSq()); }
__device__ Vec3 & Normalize() { return *this /= GetLength(); }
__device__ Vec3 & operator += (Vec3 const &v) { x += v.x; y += v.y; z += v.z; return *this; }
__device__ Vec3 & operator -= (Vec3 const &v) { x -= v.x; y -= v.y; z -= v.z; return *this; }
__device__ Vec3 & operator *= (float s) { x *= s; y *= s; z *= s; return *this; }
__device__ Vec3 & operator /= (float s) { x /= s; y /= s; z /= s; return *this; }
__device__ Vec3 operator + (Vec3 const &v) const { return Vec3(x+v.x, y+v.y, z+v.z); }
__device__ Vec3 operator - () const { return Vec3(-x, -y, -z); }
__device__ Vec3 operator - (Vec3 const &v) const { return Vec3(x-v.x, y-v.y, z-v.z); }
__device__ Vec3 operator * (float s) const { return Vec3(x*s, y*s, z*s); }
__device__ Vec3 operator / (float s) const { return Vec3(x/s, y/s, z/s); }
__device__ float operator * (Vec3 const &v) const { return x*v.x + y*v.y + z*v.z; }
};
*/
typedef struct Vec3 {
float x;
float y;
float z;
} Vec3;
struct kernel_consts {
float h;
float hSq;
float densityCoeff;
float pressureCoeff;
float viscosityCoeff;
float tc_orig;
Vec3 delta;
};
struct kernel_consts host;
//device memory
struct kernel_consts *dev;
#warning we use dynamic memory here FIXME
/*
__device__ float h;
__device__ float hSq;
__device__ float densityCoeff;
__device__ float pressureCoeff;
__device__ float viscosityCoeff;
__device__ float tc_orig;
__device__ Vec3 delta;
*/
__host__ __device__
inline Vec3 *operator_add (Vec3 *n,const Vec3 *v,const Vec3 *s) { n->x=v->x+s->x; n->y=v->y+s->y; n->z=v->z+s->z; return n;}
__host__ __device__
inline Vec3 *operator_sub (Vec3 *n,const Vec3 *v,const Vec3 *s) { n->x=v->x-s->x; n->y=v->y-s->y; n->z=v->z-s->z; return n;}
__host__ __device__
inline Vec3 *operator_mult (Vec3 *n,const Vec3 *v,const float s) { n->x=v->x*s; n->y=v->y*s; n->z=v->z*s; return n;}
__host__ __device__
inline Vec3 *operator_div (Vec3 *n,const Vec3 *v,const float s) { n->x=v->x/s; n->y=v->y/s; n->z=v->z/s; return n;}
__host__ __device__
inline Vec3 *operator_minus (Vec3 *n,const Vec3 *v) { n->x=-v->x; n->y=-v->y; n->z=-v->z; return n;}
__host__ __device__
inline float operator_mult_to_float (const Vec3 *v,const Vec3 *s) { return s->x*v->x + s->y*v->y + s->z*v->z; }
__device__
inline float GetLengthSq(Vec3 *v) { return operator_mult_to_float(v,v); }
__device__
inline float GetLength(Vec3 *v) { return sqrtf(GetLengthSq(v)); }
__device__
inline Vec3 *Normalize(Vec3 *v) { return operator_div(v,v,GetLength(v)); }
////////////////////////////////////////////////////////////////////////////////
// there is a current limitation of CELL_PARTICLES particles per cell
// (this structure use to be a simple linked-list of particles but, due to
// improved cache locality, we get a huge performance increase by copying
// particles instead of referencing them)
struct Cell
{
Vec3 p[CELL_PARTICLES];
Vec3 hv[CELL_PARTICLES];
Vec3 v[CELL_PARTICLES];
Vec3 a[CELL_PARTICLES];
float density[CELL_PARTICLES];
//int debug[CELL_PARTICLES];
};
////////////////////////////////////////////////////////////////////////////////
const float timeStep = 0.005f;
const float doubleRestDensity = 2000.f;
const float kernelRadiusMultiplier = 1.695f;
const float h_stiffness = 1.5f;
const float viscosity = 0.4f;
__device__ const Vec3 externalAcceleration = {0.f, -9.8f, 0.f};
__device__ const Vec3 domainMin = {-0.065f, -0.08f, -0.065f};
__device__ const Vec3 domainMax = { 0.065f, 0.1f, 0.065f };
const Vec3 h_domainMin = {-0.065f, -0.08f, -0.065f};
const Vec3 h_domainMax = { 0.065f, 0.1f, 0.065f };
float restParticlesPerMeter;
// number of grid cells in each dimension
int nx;
int ny;
int nz;
int origNumParticles = 0;
int numParticles = 0;
int numCells = 0;
//device memory
Cell *cells;
int *cnumPars;
Cell *cells2;
int *cnumPars2;
//host memory
Cell *h_cells;
int *h_cnumPars;
Cell *h_cells2;
int *h_cnumPars2;
// flags which cells lie on grid boundaries
int *h_border;
int *border;
int XDIVS = 1; // number of partitions in X
int ZDIVS = 1; // number of partitions in Z
#define NUM_GRIDS ((XDIVS) * (ZDIVS))
/**/
struct Grid
{
int sx, sy, sz;
int ex, ey, ez;
} *grids;
/**/
////////////////////////////////////////////////////////////////////////////////
/*
* hmgweight
*
* Computes the hamming weight of x
*
* x - input value
* lsb - if x!=0 position of smallest bit set, else -1
*
* return - the hamming weight
*/
unsigned int hmgweight(unsigned int x, int *lsb) {
unsigned int weight=0;
unsigned int mask= 1;
unsigned int count=0;
*lsb=-1;
while(x > 0) {
//unsigned int temp;
//temp=(x&mask);
if ((x&mask) == 1) {
weight++;
if (*lsb == -1) *lsb = count;
}
x >>= 1;
count++;
}
return weight;
}
void InitSim(char const *fileName, unsigned int threadnum) {
//Compute partitioning based on square root of number of threads
//NOTE: Other partition sizes are possible as long as XDIVS * ZDIVS == threadnum,
// but communication is minimal (and hence optimal) if XDIVS == ZDIVS
FILE *file;
int lsb;
if (hmgweight(threadnum,&lsb) != 1) {
printf("Number of threads must be a power of 2\n");
exit(1);
}
XDIVS = 1<<(lsb/2);
ZDIVS = 1<<(lsb/2);
/*
if (XDIVS*ZDIVS != threadnum) XDIVS*=2;
assert(XDIVS * ZDIVS == threadnum);
*/
grids = (struct Grid*)malloc(NUM_GRIDS*sizeof(struct Grid));
//Load input particles
printf("Loading file \"%s\"...\n",fileName);
file = fopen(fileName,"rb");
assert(file);
fread(&restParticlesPerMeter,4,1,file);
fread(&origNumParticles,4,1,file);
if (!isLittleEndian()) {
restParticlesPerMeter = bswap_float(restParticlesPerMeter);
origNumParticles = bswap_int32(origNumParticles);
}
numParticles = origNumParticles;
printf("restParticlesPerMeter: %f\norigNumParticles: %d\n",restParticlesPerMeter,origNumParticles);
float h_h = kernelRadiusMultiplier / restParticlesPerMeter;
float h_hSq = h_h*h_h;
float h_tc_orig = h_hSq*h_hSq*h_hSq;
printf("h_h: %f\n",h_h);
const float pi = 3.14159265358979f;
float coeff1 = 315.f / (64.f*pi*pow(h_h,9.f));
float coeff2 = 15.f / (pi*pow(h_h,6.f));
float coeff3 = 45.f / (pi*pow(h_h,6.f));
float particleMass = 0.5f*doubleRestDensity / (restParticlesPerMeter*restParticlesPerMeter*restParticlesPerMeter);
float h_densityCoeff = particleMass * coeff1;
float h_pressureCoeff = 3.f*coeff2 * 0.5f*h_stiffness * particleMass;
float h_viscosityCoeff = viscosity * coeff3 * particleMass;
Vec3 range;
operator_sub(&range,&h_domainMax,&h_domainMin);
nx = (int)(range.x / h_h);
ny = (int)(range.y / h_h);
nz = (int)(range.z / h_h);
assert(nx >= 1 && ny >= 1 && nz >= 1);
numCells = nx*ny*nz;
printf("Number of cells: %d\n",numCells);
Vec3 h_delta;
h_delta.x = range.x / nx;
h_delta.y = range.y / ny;
h_delta.z = range.z / nz;
assert(h_delta.x >= h_h && h_delta.y >= h_h && h_delta.z >= h_h);
assert(nx >= XDIVS && nz >= ZDIVS);
/* this determines the size of the grid (in gpu world these are the blocks) */
int gi = 0;
int sx, sz, ex, ez;
ex = 0;
for (int i = 0; i < XDIVS; ++i)
{
sx = ex;
ex = int(float(nx)/float(XDIVS) * (i+1) + 0.5f);
assert(sx < ex);
//printf("dimx : %d\n",ex-sx);
ez = 0;
for (int j = 0; j < ZDIVS; ++j, ++gi)
{
sz = ez;
ez = int(float(nz)/float(ZDIVS) * (j+1) + 0.5f);
assert(sz < ez);
//printf("dimz : %d\n",ez-sz);
grids[gi].sx = sx;
grids[gi].ex = ex;
grids[gi].sy = 0;
grids[gi].ey = ny;
grids[gi].sz = sz;
grids[gi].ez = ez;
}
}
assert(gi == NUM_GRIDS);
/**/
h_border = (int*)malloc(numCells*sizeof(int));
for (int i = 0; i < NUM_GRIDS; ++i) {
//printf("limits: (%d..%d, %d..%d, %d..%d)\n",grids[i].sx,grids[i].ex,grids[i].sy,grids[i].ey,grids[i].sz,grids[i].ez);
for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
for (int ix = grids[i].sx; ix < grids[i].ex; ++ix)
{
int index = (iz*ny + iy)*nx + ix;
h_border[index] = 0;
for (int dk = -1; dk <= 1; ++dk)
for (int dj = -1; dj <= 1; ++dj)
for (int di = -1; di <= 1; ++di)
{
int ci = ix + di;
int cj = iy + dj;
int ck = iz + dk;
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
if ( ci < grids[i].sx || ci >= grids[i].ex ||
cj < grids[i].sy || cj >= grids[i].ey ||
ck < grids[i].sz || ck >= grids[i].ez ) {
h_border[index] = 1;
}
}
}
}
/**/
//device memory
CudaSafeCall( __LINE__, cudaMalloc((void**)&cells, numCells * sizeof(struct Cell)) );
CudaSafeCall( __LINE__, cudaMalloc((void**)&cnumPars, numCells * sizeof(int)) );
CudaSafeCall( __LINE__, cudaMalloc((void**)&cells2, numCells * sizeof(struct Cell)) );
CudaSafeCall( __LINE__, cudaMalloc((void**)&cnumPars2, numCells * sizeof(int)) );
CudaSafeCall( __LINE__, cudaMalloc((void**)&border, numCells * sizeof(int)) );
CudaSafeCall ( __LINE__, cudaMemcpy(border, h_border, numCells*sizeof(int), cudaMemcpyHostToDevice) );
assert(h_border && border);
//host memory
h_cells = (struct Cell*)malloc(numCells * sizeof(struct Cell));
h_cnumPars = (int*)calloc(numCells,sizeof(int));
h_cells2 = (struct Cell*)malloc(numCells * sizeof(struct Cell));
h_cnumPars2 = (int*)calloc(numCells,sizeof(int));
assert(cells && cnumPars);
assert(cells2 && cnumPars2);
assert(h_cells && h_cnumPars);
assert(h_cells2 && h_cnumPars2);
printf("sizeof(struct Cell) * numCells : %d * %d = %d\n",sizeof(struct Cell), numCells,sizeof(struct Cell)*numCells);
printf("sizeof(int) * numCells : %d * %d = %d\n",sizeof(int), numCells,sizeof(int)*numCells);
printf("total device memory: %d\n",2*numCells*(sizeof(struct Cell)+sizeof(int)));
assert(2*numCells*(sizeof(struct Cell)+sizeof(int))< 536543232); //my card has 512MB of global memory
//we used calloc instead
//memset(h_cnumPars2, 0, numCells*sizeof(int));
float px, py, pz, hvx, hvy, hvz, vx, vy, vz;
for (int i = 0; i < origNumParticles; ++i)
{
fread(&px, 4,1,file);
fread(&py, 4,1,file);
fread(&pz, 4,1,file);
fread(&hvx, 4,1,file);
fread(&hvy, 4,1,file);
fread(&hvz, 4,1,file);
fread(&vx, 4,1,file);
fread(&vy, 4,1,file);
fread(&vz, 4,1,file);
if (!isLittleEndian()) {
px = bswap_float(px);
py = bswap_float(py);
pz = bswap_float(pz);
hvx = bswap_float(hvx);
hvy = bswap_float(hvy);
hvz = bswap_float(hvz);
vx = bswap_float(vx);
vy = bswap_float(vy);
vz = bswap_float(vz);
}
int ci = (int)((px - h_domainMin.x) / h_delta.x);
int cj = (int)((py - h_domainMin.y) / h_delta.y);
int ck = (int)((pz - h_domainMin.z) / h_delta.z);
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
int index = (ck*ny + cj)*nx + ci;
Cell &cell = h_cells2[index];
int np = h_cnumPars2[index];
if (np < CELL_PARTICLES)
{
cell.p[np].x = px;
cell.p[np].y = py;
cell.p[np].z = pz;
cell.hv[np].x = hvx;
cell.hv[np].y = hvy;
cell.hv[np].z = hvz;
cell.v[np].x = vx;
cell.v[np].y = vy;
cell.v[np].z = vz;
++h_cnumPars2[index];
}
else
--numParticles;
}
fclose(file);
host.h = h_h;
host.hSq = h_hSq;
host.densityCoeff = h_densityCoeff;
host.pressureCoeff = h_pressureCoeff;
host.viscosityCoeff = h_viscosityCoeff;
host.tc_orig = h_tc_orig;
host.delta.x = h_delta.x;
host.delta.y = h_delta.y;
host.delta.z = h_delta.z;
CudaSafeCall( __LINE__, cudaMalloc((void**)&dev, sizeof(struct kernel_consts)) );
CudaSafeCall ( __LINE__, cudaMemcpy(dev, &host, sizeof(struct kernel_consts), cudaMemcpyHostToDevice) );
/*
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("h", &h_h, sizeof(float), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("hSq", &h_hSq, sizeof(float), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("densityCoeff", &h_densityCoeff, sizeof(float), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("pressureCoeff", &h_pressureCoeff, sizeof(float), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("viscosityCoeff", &h_viscosityCoeff, sizeof(float), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("delta", &h_delta, sizeof(struct Vec3), 0, cudaMemcpyHostToDevice) );
CudaSafeCall ( __LINE__, cudaMemcpyToSymbol("tc_orig", &h_tc_orig, sizeof(float), 0, cudaMemcpyHostToDevice) );
*/
printf("Number of particles: %d (%d) skipped\n",numParticles,origNumParticles-numParticles);
}
////////////////////////////////////////////////////////////////////////////////
void SaveFile(char const *fileName) {
printf("Saving file \"%s\"...\n", fileName);
FILE *file;
file = fopen(fileName,"wb+");
assert(file);
if (!isLittleEndian()) {
float restParticlesPerMeter_le;
int origNumParticles_le;
restParticlesPerMeter_le = bswap_float(restParticlesPerMeter);
origNumParticles_le = bswap_int32(origNumParticles);
fwrite(&restParticlesPerMeter_le, 4,1,file);
fwrite(&origNumParticles_le, 4,1,file);
} else {
fwrite((char *)&restParticlesPerMeter, 4,1,file);
fwrite((char *)&origNumParticles, 4,1,file);
}
int count = 0;
for (int i = 0; i < numCells; ++i) {
Cell const &cell = h_cells[i];
int np = h_cnumPars[i];
//printf("np: %d\n",np);
for (int j = 0; j < np; ++j) {
if (!isLittleEndian()) {
float px, py, pz, hvx, hvy, hvz, vx,vy, vz;
px = bswap_float(cell.p[j].x);
py = bswap_float(cell.p[j].y);
pz = bswap_float(cell.p[j].z);
hvx = bswap_float(cell.hv[j].x);
hvy = bswap_float(cell.hv[j].y);
hvz = bswap_float(cell.hv[j].z);
vx = bswap_float(cell.v[j].x);
vy = bswap_float(cell.v[j].y);
vz = bswap_float(cell.v[j].z);
fwrite((char *)&px, 4,1,file);
fwrite((char *)&py, 4,1,file);
fwrite((char *)&pz, 4,1,file);
fwrite((char *)&hvx, 4,1,file);
fwrite((char *)&hvy, 4,1,file);
fwrite((char *)&hvz, 4,1,file);
fwrite((char *)&vx, 4,1,file);
fwrite((char *)&vy, 4,1,file);
fwrite((char *)&vz, 4,1,file);
} else {
fwrite((char *)&cell.p[j].x, 4,1,file);
fwrite((char *)&cell.p[j].y, 4,1,file);
fwrite((char *)&cell.p[j].z, 4,1,file);
fwrite((char *)&cell.hv[j].x, 4,1,file);
fwrite((char *)&cell.hv[j].y, 4,1,file);
fwrite((char *)&cell.hv[j].z, 4,1,file);
fwrite((char *)&cell.v[j].x, 4,1,file);
fwrite((char *)&cell.v[j].y, 4,1,file);
fwrite((char *)&cell.v[j].z, 4,1,file);
}
++count;
}
}
assert(count == numParticles);
int numSkipped = origNumParticles - numParticles;
float zero = 0.f;
if (!isLittleEndian()) {
zero = bswap_float(zero);
}
for (int i = 0; i < numSkipped; ++i)
{
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
fwrite((char *)&zero, 4,1,file);
}
fflush(file);
fclose(file);
}
////////////////////////////////////////////////////////////////////////////////
void CleanUpSim()
{
//free host memory
free(h_cells2);
free(h_cnumPars2);
free(grids);
free(h_border);
//free device memory
CudaSafeCall( __LINE__, cudaFree(border) );
CudaSafeCall( __LINE__, cudaFree(dev) );
CudaSafeCall( __LINE__, cudaFree(cells) );
CudaSafeCall( __LINE__, cudaFree(cnumPars) );
CudaSafeCall( __LINE__, cudaFree(cells2) );
CudaSafeCall( __LINE__, cudaFree(cnumPars2) );
}
////////////////////////////////////////////////////////////////////////////////
// idx = (iz*ny + iy)*nx + ix
#define GET_IDX_X(idx) ((idx) % (blockDim.x * gridDim.x))
#define SKIP_DIM_X(idx) (((idx) - GET_IDX_X(idx)) / (blockDim.x * gridDim.x))
#define GET_IDX_Y(idx) (SKIP_DIM_X(idx) % (blockDim.y * gridDim.y))
#define GET_IDX_Z(idx) ((SKIP_DIM_X(idx) - GET_IDX_Y(idx)) / (blockDim.y * gridDim.y))
__device__ int InitNeighCellList(int ci, int cj, int ck, int *neighCells, int *cnumPars) {
int numNeighCells = 0;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int nz = blockDim.z * gridDim.z;
for (int di = -1; di <= 1; ++di)
for (int dj = -1; dj <= 1; ++dj)
for (int dk = -1; dk <= 1; ++dk) {
int ii = ci + di;
int jj = cj + dj;
int kk = ck + dk;
if (ii >= 0 && ii < nx && jj >= 0 && jj < ny && kk >= 0 && kk < nz) {
int index = (kk*ny + jj)*nx + ii;
//consider only cell neighbors who acltually have particles
if (cnumPars[index] != 0) {
neighCells[numNeighCells] = index;
}
++numNeighCells;
}
}
return numNeighCells;
}
////////////////////////////////////////////////////////////////////////////////
__global__ void big_kernel(Cell *cells, int *cnumPars,Cell *cells2, int *cnumPars2,struct kernel_consts *dev,int *border) {
int ix;
int iy;
int iz;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int nz = blockDim.z * gridDim.z;
ix = blockIdx.x * blockDim.x + threadIdx.x;
iy = blockIdx.y * blockDim.y + threadIdx.y;
iz = blockIdx.z * blockDim.z + threadIdx.z;
//printf("x: %d : %d\n",nx,blockDim.x * gridDim.x);
//printf("y: %d : %d\n",ny,blockDim.y * gridDim.y);
//printf("z: %d : %d\n",nz,blockDim.z * gridDim.z);
//move common declarations on top
int index = (iz*ny + iy)*nx + ix;
int np; //internal loop limit
//this should be moved to shared memory
Cell &cell = cells[index]; //just a reference to the correspondig cell //FIXME
int neighCells[27];
//it is safe to move the call here, neighbours do not change between the two original calls
//move this computation to cpu
//const float tc_orig = hSq*hSq*hSq;
const float parSize = 0.0002f;
const float epsilon = 1e-10f;
const float stiffness = 30000.f;
const float damping = 128.f;
/*
for (i=0;i<27;i++) {
neighCells[i] = 0xffffffff;
}
*/
int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells,cnumPars);
/*
//printf("thread %d: number of neighbors: %d\n",index,numNeighCells);
for (int i=0;i<numNeighCells;i++) {
printf("thread %d : %d-th neighbor %d\n",index,i,neighCells[i]);
}
*/
////////////////////////////////////////////////////////////////////////////////
//void ClearParticlesMT(int i) {
////////////////////////////////////////////////////////////////////////////////
/**/
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
cnumPars[index] = 0;
// } //close nested loop;
__syncthreads();
//} close ClearParticlesMT()
////////////////////////////////////////////////////////////////////////////////
//void RebuildGridMT(int i) {
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
Cell const &cell2 = cells2[index];
int np2 = cnumPars2[index];
for (int j = 0; j < np2; ++j) {
int ci = (int)((cell2.p[j].x - domainMin.x) / dev->delta.x);
int cj = (int)((cell2.p[j].y - domainMin.y) / dev->delta.y);
int ck = (int)((cell2.p[j].z - domainMin.z) / dev->delta.z);
if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1;
if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1;
if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1;
int index2 = (ck*ny + cj)*nx + ci;
// this assumes that particles cannot travel more than one grid cell per time step
int np_renamed = cnumPars[index2];
if (border[index2]) {
//use atomic
atomicAdd(&cnumPars[index2],1);
} else {
cnumPars[index2]++;
}
//#warning what if we exceed CELL_PARTICLES particles per cell here??
//from what I see is that we calculate the same frame over and over
//so every cell has at most CELL_PARTICLES particles, from the initialisation
Cell &cell_renamed = cells[index2];
cell_renamed.p[np_renamed].x = cell2.p[j].x;
cell_renamed.p[np_renamed].y = cell2.p[j].y;
cell_renamed.p[np_renamed].z = cell2.p[j].z;
cell_renamed.hv[np_renamed].x = cell2.hv[j].x;
cell_renamed.hv[np_renamed].y = cell2.hv[j].y;
cell_renamed.hv[np_renamed].z = cell2.hv[j].z;
cell_renamed.v[np_renamed].x = cell2.v[j].x;
cell_renamed.v[np_renamed].y = cell2.v[j].y;
cell_renamed.v[np_renamed].z = cell2.v[j].z;
//cell_renamed.debug[np_renamed] = index2;
}
// } //close nested loops
__syncthreads();
//} close RebuildGridMT()
////////////////////////////////////////////////////////////////////////////////
//void InitDensitiesAndForcesMT(int i) {
//from now on we don't change the cnumPars[index]
np = cnumPars[index]; //internal loop limit
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
cell.density[j] = 0.f;
cell.a[j].x = externalAcceleration.x;
cell.a[j].y = externalAcceleration.y;
cell.a[j].z = externalAcceleration.z;
}
// } //close nested loops
__syncthreads();
//} close InitDensitiesAndForcesMT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeDensitiesMT(int i) {
// int neighCells[27];
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// int np = cnumPars[index];
// if (np == 0) continue;
//
// if np==0 we do net enter the following loop
// int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells);
// Cell &cell = cells[index];
Vec3 tmp;
for (int j = 0; j < np; ++j)
for (int inc = 0; inc < numNeighCells; ++inc) {
int indexNeigh = neighCells[inc];
Cell &neigh = cells[indexNeigh];
int numNeighPars = cnumPars[indexNeigh];
for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh)
if (&neigh.p[iparNeigh] < &cell.p[j]) {
//float distSq = (cell.p[j] - neigh.p[iparNeigh]).GetLengthSq();
float distSq;
operator_sub(&tmp,&cell.p[j],&neigh.p[iparNeigh]);
distSq = GetLengthSq(&tmp);
if (distSq < dev->hSq) {
float t = dev->hSq - distSq;
float tc = t*t*t;
if (border[index]) {
//use atomic
atomicAdd(&cell.density[j],tc);
} else {
cell.density[j] += tc;
}
if (border[indexNeigh]) {
//use atomic
atomicAdd(&neigh.density[iparNeigh],tc);
} else {
neigh.density[iparNeigh] += tc;
}
}
}
;
}
// } //close nested loops
__syncthreads();
//} close ComputeDensitiesMT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeDensities2MT(int i) {
// const float tc = hSq*hSq*hSq;
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
cell.density[j] += dev->tc_orig;
cell.density[j] *= dev->densityCoeff;
}
// } //close nested loops
__syncthreads();
//} close ComputeDensities2MT()
////////////////////////////////////////////////////////////////////////////////
//void ComputeForcesMT(int i) {
// int neighCells[27];
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// int np = cnumPars[index];
// if (np == 0) continue;
//
// if np==0 we do net enter the following loop
// int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells);
// Cell &cell = cells[index];
for (int j = 0; j < np; ++j)
for (int inc = 0; inc < numNeighCells; ++inc) {
int indexNeigh = neighCells[inc];
Cell &neigh = cells[indexNeigh];
int numNeighPars = cnumPars[indexNeigh];
for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh)
if (&neigh.p[iparNeigh] < &cell.p[j]) {
//Vec3 disp = cell.p[j] - neigh.p[iparNeigh];
//float distSq = disp.GetLengthSq();
Vec3 disp;
operator_sub(&disp,&cell.p[j],&neigh.p[iparNeigh]);
float distSq = GetLengthSq(&disp);
if (distSq < dev->hSq) {
//float dist = sqrtf(std::max(distSq, 1e-12f));
float dist = sqrtf(fmax(distSq, 1e-12f));
float hmr = dev->h - dist;
//Vec3 acc = disp * pressureCoeff * (hmr*hmr/dist) *
// (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity);
//acc += (neigh.v[iparNeigh] - cell.v[j]) * viscosityCoeff * hmr;
//acc /= cell.density[j] * neigh.density[iparNeigh];
Vec3 acc;
operator_mult(&acc,&disp, dev->pressureCoeff * (hmr*hmr/dist) *
(cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity));
operator_sub(&tmp,&neigh.v[iparNeigh],&cell.v[j]);
operator_mult(&tmp,&tmp,dev->viscosityCoeff * hmr);
operator_add(&acc,&acc,&tmp);
operator_div(&acc,&acc,cell.density[j] * neigh.density[iparNeigh]);
if (border[index]) {
//use atomics
#warning this works because no one reads these values at the moment ??
atomicAdd(&cell.a[j].x,acc.x);
atomicAdd(&cell.a[j].y,acc.y);
atomicAdd(&cell.a[j].z,acc.z);
} else {
operator_add(&cell.a[j],&cell.a[j],&acc);
}
if (border[indexNeigh]) {
//use atomics
#warning this works because no one reads these values at the moment ??
//reminder: there is no atomicSub for floats, so we add the negative value
atomicAdd(&neigh.a[iparNeigh].x,-acc.x);
atomicAdd(&neigh.a[iparNeigh].y,-acc.y);
atomicAdd(&neigh.a[iparNeigh].z,-acc.z);
} else {
operator_sub(&neigh.a[iparNeigh],&neigh.a[iparNeigh],&acc);
}
}
}
}
// } //close nested loops
__syncthreads();
//} close ComputeForcesMT()
////////////////////////////////////////////////////////////////////////////////
//void ProcessCollisionsMT(int i) {
// const float parSize = 0.0002f;
// const float epsilon = 1e-10f;
// const float stiffness = 30000.f;
// const float damping = 128.f;
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
//Vec3 pos = cell.p[j] + cell.hv[j] * timeStep;
Vec3 pos;
operator_mult(&pos,&cell.hv[j],timeStep);
operator_add(&pos,&pos,&cell.p[j]);
float diff = parSize - (pos.x - domainMin.x);
if (diff > epsilon)
cell.a[j].x += stiffness*diff - damping*cell.v[j].x;
diff = parSize - (domainMax.x - pos.x);
if (diff > epsilon)
cell.a[j].x -= stiffness*diff + damping*cell.v[j].x;
diff = parSize - (pos.y - domainMin.y);
if (diff > epsilon)
cell.a[j].y += stiffness*diff - damping*cell.v[j].y;
diff = parSize - (domainMax.y - pos.y);
if (diff > epsilon)
cell.a[j].y -= stiffness*diff + damping*cell.v[j].y;
diff = parSize - (pos.z - domainMin.z);
if (diff > epsilon)
cell.a[j].z += stiffness*diff - damping*cell.v[j].z;
diff = parSize - (domainMax.z - pos.z);
if (diff > epsilon)
cell.a[j].z -= stiffness*diff + damping*cell.v[j].z;
}
// } //close nested loops
__syncthreads();
//} close ProcessCollisionsMT()
////////////////////////////////////////////////////////////////////////////////
//void AdvanceParticlesMT(int i) {
// for (int iz = grids[i].sz; iz < grids[i].ez; ++iz)
// for (int iy = grids[i].sy; iy < grids[i].ey; ++iy)
// for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) {
// int index = (iz*ny + iy)*nx + ix;
// Cell &cell = cells[index];
// int np = cnumPars[index];
for (int j = 0; j < np; ++j) {
//Vec3 v_half = cell.hv[j] + cell.a[j]*timeStep;
Vec3 v_half;
operator_mult(&v_half,&cell.a[j],timeStep);
operator_add(&v_half,&v_half,&cell.hv[j]);
//cell.hv[j] = v_half;
cell.hv[j].x = v_half.x;
cell.hv[j].y = v_half.y;
cell.hv[j].z = v_half.z;
//cell.v[j] *= 0.5f;
operator_mult(&cell.v[j],&cell.v[j],0.5f);
//cell.v[j] = cell.hv[j] + v_half;
operator_add(&cell.v[j],&cell.hv[j],&v_half);
//we can change v_half now, (we want to use only one tmp variable)
//cell.p[j] += v_half * timeStep;
operator_mult(&v_half,&v_half,timeStep);
operator_add(&cell.p[j],&cell.p[j],&v_half);
}
// } //close nested loops
__syncthreads();
//} close AdvanceParticlesMT()
////////////////////////////////////////////////////////////////////////////////
/**/
} //close big_kernel()
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
int i;
int grid_x;
int grid_y;
int grid_z;
int block_x;
int block_y;
int block_z;
if (argc < 4 || argc >= 6) {
printf("Usage: %s <threadnum> <framenum> <.fluid input file> [.fluid output file]\n",argv[0]);
exit(EXIT_FAILURE);
}
int threadnum = atoi(argv[1]);
int framenum = atoi(argv[2]);
//Check arguments
if (threadnum < 1) {
printf("<threadnum> must at least be 1\n");
exit(EXIT_FAILURE);
}
if (framenum < 1) {
printf("<framenum> must at least be 1\n");
exit(EXIT_FAILURE);
}
//read input file, allocate memory, etc
InitSim(argv[3], threadnum);
//minus 1, because indexing starts from 0 and here we declare the block size
// block_x should be nx / XDIVS
// block_y should be ny //no partitioning here
// block_z should be nz / ZDIVS
grid_x = XDIVS;
grid_y = 1; //no partitioning here
grid_z = ZDIVS;
block_x = nx / XDIVS;
block_y = ny;
block_z = nz / ZDIVS;
//kernel stuff
dim3 grid(grid_x, grid_y, grid_z);
dim3 block(block_x, block_y, block_z);
//dim3 grid(grid_z, grid_x, grid_y);
//dim3 block(block_z, block_x, block_y);
//dim3 grid(1,1,1);
//dim3 block(8,8,8);
//move data to device
CudaSafeCall( __LINE__, cudaMemcpy(cells2, h_cells2, numCells * sizeof(struct Cell), cudaMemcpyHostToDevice) );
CudaSafeCall( __LINE__, cudaMemcpy(cnumPars2, h_cnumPars2, numCells * sizeof(int), cudaMemcpyHostToDevice) );
printf("grid (%d, %d, %d) block (%d, %d, %d)\n",
grid.x,grid.y,grid.z,block.x,block.y,block.z);
for (i=0;i<framenum;i++) {
big_kernel<<<grid,block>>>(cells,cnumPars,cells2,cnumPars2,dev,border);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("Cuda error: line %d: %s.\n", __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
}
//move data to host
CudaSafeCall( __LINE__, cudaMemcpy(h_cells, cells, numCells * sizeof(struct Cell), cudaMemcpyDeviceToHost) );
CudaSafeCall( __LINE__, cudaMemcpy(h_cnumPars, cnumPars, numCells * sizeof(int), cudaMemcpyDeviceToHost) );
// /*debug
int j;
for (i=0;i<numCells;i++) {
//if (h_cnumPars[i]!=i) { printf("got %d : expected : %d\n",h_cnumPars[i],i); }
/*for (j=0;j<h_cnumPars[i];j++) {
if (h_cells[i].debug[j] >= numCells) {
printf("in cell %d: particle %d: index2 out of bounds: %d\n",
i,j,h_cells[i].debug[j]);
}
}*/
}
// */
if (argc > 4) {
SaveFile(argv[4]);
}
CleanUpSim();
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
|
96ecc4ad415cf87e195249d77800cf759101aaeb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int bx = blockDim.x;
int i = blockIdx.x*bx*2 + tx;
int j = blockIdx.y*2;
__shared__ float cb0[512], cb1[512];
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0, sum3 = 0.0;
for ( int ks = 0; ks < p; ks += bx ) {
cb0[tx] = c[ks+tx+pitch_c*j];
cb1[tx] = c[ks+tx+pitch_c*(j+1)];
__syncthreads();
for ( int k = ks; k < ks+bx; ++k ) {
float rb = b[i+pitch_b*k];
float rb1 = b[i+bx+pitch_b*k];
sum0 += rb * cb0[k-ks];
sum1 += rb * cb1[k-ks];
sum2 += rb1 * cb0[k-ks];
sum3 += rb1 * cb1[k-ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+pitch_a*(j+1)] = sum1;
a[i+bx+pitch_a*j] = sum2;
a[i+bx+pitch_a*(j+1)] = sum3;
}
|
96ecc4ad415cf87e195249d77800cf759101aaeb.cu
|
extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int bx = blockDim.x;
int i = blockIdx.x*bx*2 + tx;
int j = blockIdx.y*2;
__shared__ float cb0[512], cb1[512];
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0, sum3 = 0.0;
for ( int ks = 0; ks < p; ks += bx ) {
cb0[tx] = c[ks+tx+pitch_c*j];
cb1[tx] = c[ks+tx+pitch_c*(j+1)];
__syncthreads();
for ( int k = ks; k < ks+bx; ++k ) {
float rb = b[i+pitch_b*k];
float rb1 = b[i+bx+pitch_b*k];
sum0 += rb * cb0[k-ks];
sum1 += rb * cb1[k-ks];
sum2 += rb1 * cb0[k-ks];
sum3 += rb1 * cb1[k-ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+pitch_a*(j+1)] = sum1;
a[i+bx+pitch_a*j] = sum2;
a[i+bx+pitch_a*(j+1)] = sum3;
}
|
7ebc68f59cd11876fe57131735ba82d597ebcb36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ResidualForward(const int nthreads, const Dtype* bottom_data,
const Dtype* bottom_label, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int predt = static_cast<int>(bottom_data[index]);
int label = static_cast<int>(bottom_label[index]);
top_data[index] = label - predt;
}
}
template <typename Dtype>
void ResidualValueLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = bottom[0]->count();
hipLaunchKernelGGL(( ResidualForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom_label, top_data);
}
template <typename Dtype>
void ResidualValueLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
// TODO(Yangqing): implement the GPU version of softmax.
Backward_cpu(top, propagate_down, bottom);
}
INSTANTIATE_CLASS(ResidualValueLayer);
} // namespace caffe
|
7ebc68f59cd11876fe57131735ba82d597ebcb36.cu
|
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ResidualForward(const int nthreads, const Dtype* bottom_data,
const Dtype* bottom_label, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int predt = static_cast<int>(bottom_data[index]);
int label = static_cast<int>(bottom_label[index]);
top_data[index] = label - predt;
}
}
template <typename Dtype>
void ResidualValueLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = bottom[0]->count();
ResidualForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom_label, top_data);
}
template <typename Dtype>
void ResidualValueLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
// TODO(Yangqing): implement the GPU version of softmax.
Backward_cpu(top, propagate_down, bottom);
}
INSTANTIATE_CLASS(ResidualValueLayer);
} // namespace caffe
|
1a47b2901ee8ab91c99d95dd26dd97f212d8bfbd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <rocblas.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "cutlass/predicate_vector.h"
#include "tools/util/host_tensor.h"
namespace test {
template <typename PredicateVector>
__global__ void load_predicates(unsigned *output, unsigned const *input) {
PredicateVector predicates;
int const word_count = (PredicateVector::kPredicates + 31) / 32;
int i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned word = input[word_idx];
CUTLASS_PRAGMA_UNROLL
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = ((word >> bit) & 1);
predicates.set(i, pred);
if (predicates.at(i) != pred) {
printf("ERROR - cannot read back predicate\n");
}
++i;
}
}
__syncthreads();
i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned result = 0;
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = predicates.at(i ++);
result |= (unsigned(pred) << bit);
}
output[word_idx] = result;
}
}
}
TEST(PredicateVector, Basic) {
static int const Bits = 32;
static int const Words = (Bits + 31) / 32;
typedef cutlass::PredicateVector<Bits> PredicateVector;
cutlass::HostTensor<unsigned> output;
cutlass::HostTensor<unsigned> input;
output.resize(Words);
input.resize(Words);
// some arbitrary test bits
unsigned values[] = {
0xdeadbeef,
0xa0070032,
0x9076d001,
0x00000000,
0xabdfc0ad
};
for (int test = 0; test < 5; ++test) {
input[0] = values[test];
output[0] = 0;
input.sync_device();
output.sync_device();
hipLaunchKernelGGL(( test::load_predicates<PredicateVector>),
dim3(dim3(1,1,1)), dim3(dim3(1,1,1))
, 0, 0,
output.device_data(),
input.device_data()
);
output.sync_host();
for (int word = 0; word < Words; ++word) {
EXPECT_EQ(input[word], output[word])
<< "Expected: 0x" << std::hex << input.host_data()[word]
<< ", got: 0x" << output.host_data()[word]
<< std::dec;
}
}
}
TEST(PredicateVector, Count) {
{
typedef cutlass::PredicateVector<4, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<8, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<16, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<16, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<32, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<32, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<32, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<32, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<64, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<64, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<64, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 16)
<< "PredicateVector<64, 1> word count: " << int(PredicateVector::kWordCount);
}
}
|
1a47b2901ee8ab91c99d95dd26dd97f212d8bfbd.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cublas_v2.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "cutlass/predicate_vector.h"
#include "tools/util/host_tensor.h"
namespace test {
template <typename PredicateVector>
__global__ void load_predicates(unsigned *output, unsigned const *input) {
PredicateVector predicates;
int const word_count = (PredicateVector::kPredicates + 31) / 32;
int i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned word = input[word_idx];
CUTLASS_PRAGMA_UNROLL
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = ((word >> bit) & 1);
predicates.set(i, pred);
if (predicates.at(i) != pred) {
printf("ERROR - cannot read back predicate\n");
}
++i;
}
}
__syncthreads();
i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned result = 0;
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = predicates.at(i ++);
result |= (unsigned(pred) << bit);
}
output[word_idx] = result;
}
}
}
TEST(PredicateVector, Basic) {
static int const Bits = 32;
static int const Words = (Bits + 31) / 32;
typedef cutlass::PredicateVector<Bits> PredicateVector;
cutlass::HostTensor<unsigned> output;
cutlass::HostTensor<unsigned> input;
output.resize(Words);
input.resize(Words);
// some arbitrary test bits
unsigned values[] = {
0xdeadbeef,
0xa0070032,
0x9076d001,
0x00000000,
0xabdfc0ad
};
for (int test = 0; test < 5; ++test) {
input[0] = values[test];
output[0] = 0;
input.sync_device();
output.sync_device();
test::load_predicates<PredicateVector><<<
dim3(1,1,1), dim3(1,1,1)
>>>(
output.device_data(),
input.device_data()
);
output.sync_host();
for (int word = 0; word < Words; ++word) {
EXPECT_EQ(input[word], output[word])
<< "Expected: 0x" << std::hex << input.host_data()[word]
<< ", got: 0x" << output.host_data()[word]
<< std::dec;
}
}
}
TEST(PredicateVector, Count) {
{
typedef cutlass::PredicateVector<4, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<8, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<16, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<16, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<32, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<32, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<32, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<32, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<64, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<64, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<64, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 16)
<< "PredicateVector<64, 1> word count: " << int(PredicateVector::kWordCount);
}
}
|
e4a7df7f66898b8096202cf04f4e119b021c297e.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_cc.cu
*
* @brief Simple test driver program for connected component.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
// Boost includes for CPU CC reference algorithms
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList
{
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\ntest_cc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids Host-side vector to store computed component id for each node
* @param[in] nodes Number of nodes in the graph
* @param[in] num_components Number of connected components in the graph
* @param[in] roots Host-side vector stores the root for each node in the graph
* @param[in] histogram Histogram of connected component ids
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *comp_ids,
SizeT nodes,
unsigned int num_components,
VertexId *roots,
unsigned int *histogram)
{
typedef CcList<VertexId> CcListType;
printf("Number of components: %d\n", num_components);
if (nodes <= 40)
{
printf("[");
for (VertexId i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
}
else
{
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * num_components);
for (int i = 0; i < num_components; ++i)
{
cclist[i].root = roots[i];
cclist[i].histogram = histogram[i];
}
std::stable_sort(
cclist, cclist + num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (num_components < 10) ? num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
free(cclist);
}
}
/**
* Performance/Evaluation statistics
*/
/******************************************************************************
* CC Testing Routines
*****************************************************************************/
/**
* @brief CPU-based reference CC algorithm using Boost Graph Library
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] row_offsets Host-side vector stores row offsets for each node in the graph
* @param[in] column_indices Host-side vector stores column indices for each edge in the graph
* @param[in] num_nodes
* @param[out] labels Host-side vector to store the component id for each node in the graph
*
* \return Number of connected components in the graph
*/
template<typename VertexId, typename SizeT>
unsigned int RefCPUCC(
SizeT *row_offsets, VertexId *column_indices, int num_nodes, int *labels)
{
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS> Graph;
Graph G;
for (int i = 0; i < num_nodes; ++i)
{
for (int j = row_offsets[i]; j < row_offsets[i+1]; ++j)
{
add_edge(i, column_indices[j], G);
}
}
CpuTimer cpu_timer;
cpu_timer.Start();
int num_components = connected_components(G, &labels[0]);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU CC finished in %lf msec.\n", elapsed);
return num_components;
}
/**
* @brief Run tests for connected component algorithm
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size Maximum CTA occupancy for CC kernels
* @param[in] iterations Number of iterations for running the test
* @param[in] num_gpus Number of GPUs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int iterations,
int num_gpus)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> Problem; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor map
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("Computing reference value ...\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
// Perform CC
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
cc_enactor.template Enact<Problem>(csr_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
// printf("iteration %d, time:%5f\n", iter + 1, gpu_timer.ElapsedMillis());
}
elapsed /= iterations;
// Copy out results
util::GRError(
csr_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
// Validity
if (ref_num_components == csr_problem->num_components)
printf("CORRECT.\n");
else
printf("INCORRECT. Ref Component Count: %d,"
"GPU Computed Component Count: %d\n",
ref_num_components, csr_problem->num_components);
//if (ref_num_components == csr_problem->num_components)
{
// Compute size and root of each component
VertexId *h_roots = new VertexId[csr_problem->num_components];
unsigned int *h_histograms = new unsigned int[csr_problem->num_components];
csr_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplaySolution(
h_component_ids, graph.nodes,
ref_num_components, h_roots, h_histograms);
if (h_roots) delete[] h_roots;
if (h_histograms) delete[] h_histograms;
}
printf("GPU Connected Component finished in %lf msec.\n", elapsed);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_component_ids) free(reference_component_ids);
if (h_component_ids) free(h_component_ids);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args)
{
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
int iterations = 1;
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("iteration-num", iterations);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
iterations,
num_gpus);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
iterations,
num_gpus);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
DeviceInit(args);
hipSetDeviceFlags(hipDeviceMapHost);
// Parse graph-contruction params
g_undirected = false; //Does not make undirected graph
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
fflush(stdout);
// Run tests
RunTests(csr, args);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
e4a7df7f66898b8096202cf04f4e119b021c297e.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_cc.cu
*
* @brief Simple test driver program for connected component.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
// Boost includes for CPU CC reference algorithms
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList
{
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\ntest_cc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids Host-side vector to store computed component id for each node
* @param[in] nodes Number of nodes in the graph
* @param[in] num_components Number of connected components in the graph
* @param[in] roots Host-side vector stores the root for each node in the graph
* @param[in] histogram Histogram of connected component ids
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *comp_ids,
SizeT nodes,
unsigned int num_components,
VertexId *roots,
unsigned int *histogram)
{
typedef CcList<VertexId> CcListType;
printf("Number of components: %d\n", num_components);
if (nodes <= 40)
{
printf("[");
for (VertexId i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
}
else
{
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * num_components);
for (int i = 0; i < num_components; ++i)
{
cclist[i].root = roots[i];
cclist[i].histogram = histogram[i];
}
std::stable_sort(
cclist, cclist + num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (num_components < 10) ? num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
free(cclist);
}
}
/**
* Performance/Evaluation statistics
*/
/******************************************************************************
* CC Testing Routines
*****************************************************************************/
/**
* @brief CPU-based reference CC algorithm using Boost Graph Library
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] row_offsets Host-side vector stores row offsets for each node in the graph
* @param[in] column_indices Host-side vector stores column indices for each edge in the graph
* @param[in] num_nodes
* @param[out] labels Host-side vector to store the component id for each node in the graph
*
* \return Number of connected components in the graph
*/
template<typename VertexId, typename SizeT>
unsigned int RefCPUCC(
SizeT *row_offsets, VertexId *column_indices, int num_nodes, int *labels)
{
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS> Graph;
Graph G;
for (int i = 0; i < num_nodes; ++i)
{
for (int j = row_offsets[i]; j < row_offsets[i+1]; ++j)
{
add_edge(i, column_indices[j], G);
}
}
CpuTimer cpu_timer;
cpu_timer.Start();
int num_components = connected_components(G, &labels[0]);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU CC finished in %lf msec.\n", elapsed);
return num_components;
}
/**
* @brief Run tests for connected component algorithm
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size Maximum CTA occupancy for CC kernels
* @param[in] iterations Number of iterations for running the test
* @param[in] num_gpus Number of GPUs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int iterations,
int num_gpus)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> Problem; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor map
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("Computing reference value ...\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
// Perform CC
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
cc_enactor.template Enact<Problem>(csr_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
// printf("iteration %d, time:%5f\n", iter + 1, gpu_timer.ElapsedMillis());
}
elapsed /= iterations;
// Copy out results
util::GRError(
csr_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
// Validity
if (ref_num_components == csr_problem->num_components)
printf("CORRECT.\n");
else
printf("INCORRECT. Ref Component Count: %d,"
"GPU Computed Component Count: %d\n",
ref_num_components, csr_problem->num_components);
//if (ref_num_components == csr_problem->num_components)
{
// Compute size and root of each component
VertexId *h_roots = new VertexId[csr_problem->num_components];
unsigned int *h_histograms = new unsigned int[csr_problem->num_components];
csr_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplaySolution(
h_component_ids, graph.nodes,
ref_num_components, h_roots, h_histograms);
if (h_roots) delete[] h_roots;
if (h_histograms) delete[] h_histograms;
}
printf("GPU Connected Component finished in %lf msec.\n", elapsed);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_component_ids) free(reference_component_ids);
if (h_component_ids) free(h_component_ids);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args)
{
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
int iterations = 1;
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("iteration-num", iterations);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
iterations,
num_gpus);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
iterations,
num_gpus);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
DeviceInit(args);
cudaSetDeviceFlags(cudaDeviceMapHost);
// Parse graph-contruction params
g_undirected = false; //Does not make undirected graph
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
fflush(stdout);
// Run tests
RunTests(csr, args);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
31c40cad663fb8f3612b0b1b8f0eead289c42151.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
class erodeFunctor //: public thrust::shared_unary_window_transform_functor<uchar>
{
public:
__device__ uchar operator() (const thrust::window_2d<uchar> &inputWindow,const thrust::window_2d<uchar> &outputWindow) const
{
uchar temp = 255;
for(int i = 0; i<3;i++)
{
for(int j = 0; j<3;j++)
{
temp = min((float)temp,(float)inputWindow[i][j]);
}
}
outputWindow[3/2][3/2]=temp;
return 0;
}
};
int main(int argc, char const *argv[]) {
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<unsigned char > image_block (image.cols,image.rows);
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
thrust::block_2d<uchar> null_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image_block.end()-image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> myVector = thrust::window_vector<uchar>(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> outputVector = thrust::window_vector<uchar>(&outBlock,3,3,1,1);
thrust::transform(myVector.begin(),myVector.end(),outputVector.begin(),null_block.begin(),erodeFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
|
31c40cad663fb8f3612b0b1b8f0eead289c42151.cu
|
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
class erodeFunctor //: public thrust::shared_unary_window_transform_functor<uchar>
{
public:
__device__ uchar operator() (const thrust::window_2d<uchar> &inputWindow,const thrust::window_2d<uchar> &outputWindow) const
{
uchar temp = 255;
for(int i = 0; i<3;i++)
{
for(int j = 0; j<3;j++)
{
temp = min((float)temp,(float)inputWindow[i][j]);
}
}
outputWindow[3/2][3/2]=temp;
return 0;
}
};
int main(int argc, char const *argv[]) {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<unsigned char > image_block (image.cols,image.rows);
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
thrust::block_2d<uchar> null_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image_block.end()-image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> myVector = thrust::window_vector<uchar>(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> outputVector = thrust::window_vector<uchar>(&outBlock,3,3,1,1);
thrust::transform(myVector.begin(),myVector.end(),outputVector.begin(),null_block.begin(),erodeFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
|
982cfbe4ab8a70b13c2763fa9fde16528707db0d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
* sample.cu
* This is a example for using thrust in CUDA programming.
*
* Written by: Wayne Wood
* Manchester, UK
* 22/05/2010
*
* This source code is licensed under The GNU General Public License (GPLv3)
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <thrust/version.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <iostream>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void) { return true; }
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* raw CUDA routines */
/************************************************************************/
long DATA_SIZE = 1 * 1024 * 1024; // 1 M
int * data;
// generate random number ranged in [0, 9]
void GenerateNumbers(int * number, int size)
{
srand(time(NULL));
for (int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
#define BLOCK_NUM 32
#define THREAD_NUM 512
__global__ static void sumOfSquares(int * num, int * result, clock_t * time,
int DATA_SIZE)
{
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) time[bid] = clock();
shared[tid] = 0;
for (int i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) {
shared[tid] += num[i] * num[i];
}
__syncthreads();
int offset = THREAD_NUM / 2;
while (offset > 0) {
if (tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
if (tid == 0) {
result[bid] = shared[0];
time[bid + BLOCK_NUM] = clock();
}
}
/************************************************************************/
/* helper routines for thrust */
/************************************************************************/
// define functor for
// random number ranged in [0, 9]
class random
{
public:
int operator() ()
{
return rand() % 10;
}
};
// define transformation f(x) -> x^2
template <typename T>
struct square
{
__host__ __device__
T operator() (T x)
{
return x * x;
}
};
/************************************************************************/
/* The main routine */
/************************************************************************/
int main(int argc, char* argv[])
{
if (!InitCUDA()) {
return 0;
}
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
std::cout << std::endl;
thrust::host_vector<int> vec_data;
// for timer
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
LARGE_INTEGER elapsed_time_start, elapsed_time_end;
double elapsed_time;
// output file
FILE * optr = fopen("testResults.csv", "w");
if (!optr) {
std::cout << "cannot open file!" << std::endl;
return 1;
}
fprintf(optr,"Size (M),GPU Time,CPU Time,GPU thrust,CPU thrust\n");
for (int h = 0; h < 6; h++, DATA_SIZE *= 2)
{
printf("Data size = %dM\n", DATA_SIZE / (1024 * 1024));
fprintf(optr, "%d,", DATA_SIZE / (1024 * 1024));
//////////////////////////////////////////////////////////////////
// raw CUDA
//////////////////////////////////////////////////////////////////
data = new int[DATA_SIZE];
GenerateNumbers(data, DATA_SIZE);
//
// calculation on GPU
//
QueryPerformanceCounter(&elapsed_time_start);
int * gpudata, * result;
clock_t * time;
hipMalloc((void **) &gpudata, sizeof(int) * DATA_SIZE);
hipMalloc((void **) &result, sizeof(int) * THREAD_NUM * BLOCK_NUM);
hipMalloc((void **) &time, sizeof(clock_t) * BLOCK_NUM * 2);
hipMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice);
int sum[BLOCK_NUM];
hipLaunchKernelGGL(( sumOfSquares), dim3(BLOCK_NUM), dim3(THREAD_NUM), THREAD_NUM * sizeof(int), 0,
gpudata, result, time, DATA_SIZE);
hipMemcpy(&sum, result, sizeof(int) * BLOCK_NUM, hipMemcpyDeviceToHost);
int final_sum = 0;
for (int i = 0; i < BLOCK_NUM; i++) {
final_sum += sum[i];
}
QueryPerformanceCounter(&elapsed_time_end);
hipFree(gpudata);
hipFree(result);
clock_t time_used[BLOCK_NUM * 2];
hipMemcpy(&time_used, time, sizeof(clock_t) * BLOCK_NUM * 2, hipMemcpyDeviceToHost);
hipFree(time);
clock_t min_start, max_end;
min_start = time_used[0];
max_end = time_used[BLOCK_NUM];
for (int i = 1; i < BLOCK_NUM; i++) {
if (min_start > time_used[i])
min_start = time_used[i];
if (max_end < time_used[i + BLOCK_NUM])
max_end = time_used[i + BLOCK_NUM];
}
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
// elapsed_time = (double)(max_end - min_start) / CLOCKS_PER_SEC;
printf("sum (on GPU): %d; time: %lf (core clocks: %d)\n", final_sum, elapsed_time,
max_end - min_start);
fprintf(optr, "%f,", elapsed_time);
//
// calculation on CPU
//
QueryPerformanceCounter(&elapsed_time_start);
final_sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
final_sum += data[i] * data[i];
}
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on CPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f,", elapsed_time);
free(data);
//////////////////////////////////////////////////////////////////
// with thrust support
//////////////////////////////////////////////////////////////////
std::cout << "if with thrust support," << std::endl;
//
// calculation on GPU
//
vec_data.resize(DATA_SIZE);
//srand(time(NULL));
thrust::generate(vec_data.begin(), vec_data.end(), random());
QueryPerformanceCounter(&elapsed_time_start);
thrust::device_vector<int> vec_gpudata = vec_data;
final_sum = thrust::transform_reduce(vec_gpudata.begin(), vec_gpudata.end(),
square<int>(), 0, thrust::plus<int>());
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on GPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f,", elapsed_time);
//
// calculation on CPU
//
QueryPerformanceCounter(&elapsed_time_start);
final_sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
final_sum += vec_data[i] * vec_data[i];
}
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on CPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f\n", elapsed_time);
std::cout << std::endl;
}
fclose(optr);
return 0;
}
|
982cfbe4ab8a70b13c2763fa9fde16528707db0d.cu
|
/********************************************************************
* sample.cu
* This is a example for using thrust in CUDA programming.
*
* Written by: Wayne Wood
* Manchester, UK
* 22/05/2010
*
* This source code is licensed under The GNU General Public License (GPLv3)
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <thrust/version.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <iostream>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void) { return true; }
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* raw CUDA routines */
/************************************************************************/
long DATA_SIZE = 1 * 1024 * 1024; // 1 M
int * data;
// generate random number ranged in [0, 9]
void GenerateNumbers(int * number, int size)
{
srand(time(NULL));
for (int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
#define BLOCK_NUM 32
#define THREAD_NUM 512
__global__ static void sumOfSquares(int * num, int * result, clock_t * time,
int DATA_SIZE)
{
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) time[bid] = clock();
shared[tid] = 0;
for (int i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) {
shared[tid] += num[i] * num[i];
}
__syncthreads();
int offset = THREAD_NUM / 2;
while (offset > 0) {
if (tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
if (tid == 0) {
result[bid] = shared[0];
time[bid + BLOCK_NUM] = clock();
}
}
/************************************************************************/
/* helper routines for thrust */
/************************************************************************/
// define functor for
// random number ranged in [0, 9]
class random
{
public:
int operator() ()
{
return rand() % 10;
}
};
// define transformation f(x) -> x^2
template <typename T>
struct square
{
__host__ __device__
T operator() (T x)
{
return x * x;
}
};
/************************************************************************/
/* The main routine */
/************************************************************************/
int main(int argc, char* argv[])
{
if (!InitCUDA()) {
return 0;
}
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
std::cout << std::endl;
thrust::host_vector<int> vec_data;
// for timer
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
LARGE_INTEGER elapsed_time_start, elapsed_time_end;
double elapsed_time;
// output file
FILE * optr = fopen("testResults.csv", "w");
if (!optr) {
std::cout << "cannot open file!" << std::endl;
return 1;
}
fprintf(optr,"Size (M),GPU Time,CPU Time,GPU thrust,CPU thrust\n");
for (int h = 0; h < 6; h++, DATA_SIZE *= 2)
{
printf("Data size = %dM\n", DATA_SIZE / (1024 * 1024));
fprintf(optr, "%d,", DATA_SIZE / (1024 * 1024));
//////////////////////////////////////////////////////////////////
// raw CUDA
//////////////////////////////////////////////////////////////////
data = new int[DATA_SIZE];
GenerateNumbers(data, DATA_SIZE);
//
// calculation on GPU
//
QueryPerformanceCounter(&elapsed_time_start);
int * gpudata, * result;
clock_t * time;
cudaMalloc((void **) &gpudata, sizeof(int) * DATA_SIZE);
cudaMalloc((void **) &result, sizeof(int) * THREAD_NUM * BLOCK_NUM);
cudaMalloc((void **) &time, sizeof(clock_t) * BLOCK_NUM * 2);
cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice);
int sum[BLOCK_NUM];
sumOfSquares<<<BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int)>>>
(gpudata, result, time, DATA_SIZE);
cudaMemcpy(&sum, result, sizeof(int) * BLOCK_NUM, cudaMemcpyDeviceToHost);
int final_sum = 0;
for (int i = 0; i < BLOCK_NUM; i++) {
final_sum += sum[i];
}
QueryPerformanceCounter(&elapsed_time_end);
cudaFree(gpudata);
cudaFree(result);
clock_t time_used[BLOCK_NUM * 2];
cudaMemcpy(&time_used, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost);
cudaFree(time);
clock_t min_start, max_end;
min_start = time_used[0];
max_end = time_used[BLOCK_NUM];
for (int i = 1; i < BLOCK_NUM; i++) {
if (min_start > time_used[i])
min_start = time_used[i];
if (max_end < time_used[i + BLOCK_NUM])
max_end = time_used[i + BLOCK_NUM];
}
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
// elapsed_time = (double)(max_end - min_start) / CLOCKS_PER_SEC;
printf("sum (on GPU): %d; time: %lf (core clocks: %d)\n", final_sum, elapsed_time,
max_end - min_start);
fprintf(optr, "%f,", elapsed_time);
//
// calculation on CPU
//
QueryPerformanceCounter(&elapsed_time_start);
final_sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
final_sum += data[i] * data[i];
}
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on CPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f,", elapsed_time);
free(data);
//////////////////////////////////////////////////////////////////
// with thrust support
//////////////////////////////////////////////////////////////////
std::cout << "if with thrust support," << std::endl;
//
// calculation on GPU
//
vec_data.resize(DATA_SIZE);
//srand(time(NULL));
thrust::generate(vec_data.begin(), vec_data.end(), random());
QueryPerformanceCounter(&elapsed_time_start);
thrust::device_vector<int> vec_gpudata = vec_data;
final_sum = thrust::transform_reduce(vec_gpudata.begin(), vec_gpudata.end(),
square<int>(), 0, thrust::plus<int>());
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on GPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f,", elapsed_time);
//
// calculation on CPU
//
QueryPerformanceCounter(&elapsed_time_start);
final_sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
final_sum += vec_data[i] * vec_data[i];
}
QueryPerformanceCounter(&elapsed_time_end);
elapsed_time = (double)(elapsed_time_end.QuadPart - elapsed_time_start.QuadPart)
/ frequency.QuadPart;
printf("sum (on CPU): %d; time: %lf\n", final_sum, elapsed_time);
fprintf(optr, "%f\n", elapsed_time);
std::cout << std::endl;
}
fclose(optr);
return 0;
}
|
a9102babd1750d3c8ff078d51f6450f9838e8f8d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
#define NUMTHREADS 128 // Threads per block
#define MAXBITS 15 // maximum bits in a code
#define MAXLCODES 286 // maximum number of literal/length codes
#define MAXDCODES 30 // maximum number of distance codes
#define MAXCODES (MAXLCODES+MAXDCODES) // maximum codes lengths to read
#define FIXLCODES 288 // number of fixed literal/length codes
#define LOG2LENLUT 10
#define LOG2DISTLUT 8
struct scratch_arr
{
int16_t lengths[MAXCODES]; // descriptor code lengths
int16_t offs[MAXBITS + 1]; // offset in symbol table for each length (scratch)
};
struct lut_arr
{
int32_t lenlut[1 << LOG2LENLUT]; // LUT for length decoding
int32_t distlut[1 << LOG2DISTLUT]; // LUT for fast distance decoding
};
// 4 batches of 32 symbols
#define LOG2_BATCH_COUNT 2 // 1..5
#define LOG2_BATCH_SIZE 5
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
struct xwarp_s
{
int32_t batch_len[BATCH_COUNT]; // Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[BATCH_COUNT * BATCH_SIZE];
uint8_t symqueue8[BATCH_COUNT * BATCH_SIZE * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
#define LOG2_PREFETCH_SIZE 9 // Must be at least LOG2_BATCH_SIZE+3
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE)
#define PREFETCH_ADDR32(q, p) (uint32_t *)(&q.pref_data[(PREFETCH_SIZE - 4) & (size_t)(p)])
struct prefetch_queue_s
{
const uint8_t *cur_p; // Prefetch location
int run; // prefetcher will exit when run=0
uint8_t pref_data[PREFETCH_SIZE];
};
#endif // ENABLE_PREFETCH
struct inflate_state_s {
// output state
uint8_t *out; // output buffer
uint8_t *outbase; // start of output buffer
uint8_t *outend; // end of output buffer
// Input state
uint8_t *cur; // input buffer
uint8_t *end; // end of input buffer
uint2 bitbuf; // bit buffer (64-bit)
uint32_t bitpos; // position in bit buffer
int32_t err;
int btype; // current block type
int blast; // last block
uint32_t stored_blk_len;// length of stored (uncompressed) block
uint16_t first_slow_len; // first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[MAXBITS+1];
int16_t lensym[FIXLCODES]; // Assumes FIXLCODES >= MAXLCODES
int16_t distcnt[MAXBITS+1];
int16_t distsym[MAXDCODES];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source, unsigned int bit_start, unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s *s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s *s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s *s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32)
{
uint8_t *cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *(uint32_t *)cur : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would become quite a bit faster
__device__ uint32_t getbits(inflate_state_s *s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/*
* Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading MAXBITS bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s *s, const int16_t *counts, const int16_t *symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= MAXBITS; len++)
{
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/*
* Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= MAXBITS
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(inflate_state_s *s, int16_t *counts, int16_t *symbols, const int16_t *length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t *offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= MAXBITS; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= MAXBITS; len++)
{
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0)
return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < MAXBITS; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0)
symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff };
// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s *s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t *lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > MAXLCODES || ndist > MAXDCODES)
{
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist)
{
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0)
return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0)
return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
}
else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist)
return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0)
return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/*
* Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s *s)
{
int16_t *lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < FIXLCODES; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, FIXLCODES);
// distance table
for (symbol = 0; symbol < MAXDCODES; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, MAXDCODES);
return 0;
}
/*
* Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258 };
static const __device__ __constant__ uint16_t g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4,
5, 5, 5, 5, 0 };
static const __device__ __constant__ uint16_t g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577 };
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
12, 12, 13, 13 };
// Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s *s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
uint8_t *cur = s->cur;
uint8_t *end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do
{
volatile uint32_t *b = &s->x.u.symqueue[batch * BATCH_SIZE];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t *)&s->pref.cur_p - (int32_t)(size_t)cur < BATCH_SIZE*6)
|| (s->x.batch_len[batch] != 0))
#else
while (s->x.batch_len[batch] != 0)
#endif
{
NANOSLEEP(100);
}
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos>>3) >= end)
{
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do
{
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << LOG2LENLUT) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100<<5))
{
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << LOG2LENLUT) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
}
else
{
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t *symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = LOG2LENLUT+1; len <= MAXBITS; len++)
{
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > MAXBITS)
{
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256)
{
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256)
{
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32)
{
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *PREFETCH_ADDR32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t *)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << LOG2DISTLUT) - 1)];
if (dist > 0)
{
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
}
else
{
uint32_t next32r = __brev(next32);
const int16_t *symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = LOG2DISTLUT + 1; len <= MAXBITS; len++)
{
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > MAXBITS)
{
s->err = -10;
sym = 256;
len = 0;
}
else
{
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input buffer
bitpos += len;
if (bitpos >= 32)
{
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *PREFETCH_ADDR32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end)
{
bitbuf.y = *(const uint32_t *)cur;
cur -= 4;
}
else
{
bitbuf.y = 0;
cur -= 4;
if (cur > end)
{
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256)
break;
b[batch_len++] = sym;
} while (batch_len < BATCH_SIZE-1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s *)s)->cur = cur;
#endif
if (batch_len != 0)
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0)
{
NANOSLEEP(150);
}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
// Build lookup tables for faster decode
// LUT format is symbols*16+length
__device__ void init_length_lut(inflate_state_s *s, int t)
{
int32_t *lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << LOG2LENLUT); bits += NUMTHREADS)
{
const int16_t *cnt = s->lencnt;
const int16_t *symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - LOG2LENLUT);
for (unsigned int len = 1; len <= LOG2LENLUT; len++)
{
unsigned int code = (rbits >> (LOG2LENLUT - len)) - first;
unsigned int count = cnt[len];
if (code < count)
{
sym = symbols[code];
if (sym > 256)
{
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16-5)) | (len << (24-5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t)
{
unsigned int first = 0;
unsigned int index = 0;
const int16_t *cnt = s->lencnt;
for (unsigned int len = 1; len <= LOG2LENLUT; len++)
{
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
// Build lookup tables for faster decode of distance symbol
// LUT format is symbols*16+length
__device__ void init_distance_lut(inflate_state_s *s, int t)
{
int32_t *lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << LOG2DISTLUT); bits += NUMTHREADS)
{
const int16_t *cnt = s->distcnt;
const int16_t *symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - LOG2DISTLUT);
for (unsigned int len = 1; len <= LOG2DISTLUT; len++)
{
unsigned int code = (rbits >> (LOG2DISTLUT - len)) - first;
unsigned int count = cnt[len];
if (code < count)
{
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t)
{
unsigned int first = 0;
unsigned int index = 0;
const int16_t *cnt = s->distcnt;
for (unsigned int len = 1; len <= LOG2DISTLUT; len++)
{
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
// WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s *s, int t)
{
uint8_t *out = s->out;
const uint8_t *outend = s->outend;
const uint8_t *outbase = s->outbase;
int batch = 0;
do
{
volatile uint32_t *b = &s->x.u.symqueue[batch * BATCH_SIZE];
int batch_len, pos;
int32_t symt;
uint32_t lit_mask;
if (t == 0)
{
while ((batch_len = s->x.batch_len[batch]) == 0)
{
NANOSLEEP(100);
}
}
else
{
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len < 0)
{
break;
}
symt = (t < batch_len) ? b[t] : 256;
lit_mask = BALLOT(symt >= 256);
pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0)
{
s->x.batch_len[batch] = 0;
}
if (t < pos && out+t < outend)
{
out[t] = symt;
}
out += pos;
batch_len -= pos;
while (batch_len > 0)
{
int dist, len, symbol;
// Process a non-literal symbol
symbol = SHFL(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32)
{
const uint8_t *src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend)
{
out[i] = b;
}
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1))
{
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = SHFL(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend)
{
out[t] = symbol;
}
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
if (t == 0)
{
s->out = out;
}
}
/*
* Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s *s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7)
{
skipbits(s, 8 - (s->bitpos & 7));
}
if (s->cur + (s->bitpos >> 3) >= s->end)
{
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff))
{
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end)
{
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s *s, int t)
{
int len = s->stored_blk_len;
uint8_t *cur = s->cur + (s->bitpos >> 3);
uint8_t *out = s->out;
uint8_t *outend = s->outend;
uint8_t *cur4;
int slow_bytes = min(len, (int)((16 - (size_t)out) & 0xf));
int fast_bytes, bitpos;
// Slow copy until output is 16B aligned
if (slow_bytes)
{
for (int i = t; i < slow_bytes; i += NUMTHREADS)
{
if (out + i < outend)
{
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
fast_bytes = len;
if (out < outend)
{
fast_bytes = (int)min((size_t)fast_bytes, (outend - out));
}
fast_bytes &= ~0xf;
bitpos = ((int)(3 & (size_t)cur)) << 3;
cur4 = cur - (bitpos >> 3);
if (out < outend)
{
// Fast copy 16 bytes at a time
for (int i = t*16; i < fast_bytes; i += NUMTHREADS*16)
{
uint4 u;
u.x = *(const uint32_t *)(cur4 + i + 0*4);
u.y = *(const uint32_t *)(cur4 + i + 1*4);
u.z = *(const uint32_t *)(cur4 + i + 2*4);
u.w = *(const uint32_t *)(cur4 + i + 3*4);
if (bitpos != 0)
{
uint32_t v = (bitpos != 0) ? *(const uint32_t *)(cur4 + i + 4*4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*(uint4 *)(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += NUMTHREADS)
{
if (out + i < outend)
{
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0)
{
// Reset bitstream to end of block
uint8_t *p = cur + len;
uint32_t prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *(uint32_t *)p : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *(uint32_t *)p : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s *s, int t)
{
if (t == 0)
{
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s *s, int t)
{
const uint8_t *cur_p = s->pref.cur_p;
const uint8_t *end = s->end;
while (SHFL0((t == 0) ? s->pref.run : 0))
{
int32_t cur_lo = (int32_t)(size_t)cur_p;
int do_pref = SHFL0((t == 0) ? (cur_lo - *(volatile int32_t *)&s->cur < PREFETCH_SIZE-32*4-4) : 0);
if (do_pref)
{
const uint8_t *p = cur_p + 4*t;
*PREFETCH_ADDR32(s->pref, p) = (p < end) ? *(const uint32_t *)p : 0;
cur_p += 4*32;
__threadfence_block();
SYNCWARP();
if (!t)
{
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
else if (t == 0)
{
NANOSLEEP(150);
}
}
}
#endif // ENABLE_PREFETCH
#define GZ_FLG_FTEXT 0x01 // ASCII text hint
#define GZ_FLG_FHCRC 0x02 // Header CRC present
#define GZ_FLG_FEXTRA 0x04 // Extra fields present
#define GZ_FLG_FNAME 0x08 // Original file name present
#define GZ_FLG_FCOMMENT 0x10 // Comment present
__device__ int parse_gzip_header(const uint8_t *src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18)
{
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint32_t flags = src[3];
hdr_len = 10;
if (flags & GZ_FLG_FEXTRA) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len+1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size)
return -1;
}
if (flags & GZ_FLG_FNAME) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size)
return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZ_FLG_FCOMMENT) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size)
return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZ_FLG_FHCRC) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len+8 >= src_size)
hdr_len = -1;
}
}
return hdr_len;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(NUMTHREADS)
inflate_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s *state = &state_g;
if (!t)
{
uint8_t *p = (uint8_t *)inputs[z].srcDevice;
size_t src_size = inputs[z].srcSize;
uint32_t prefix_bytes;
// Parse header if needed
state->err = 0;
if (parse_hdr)
{
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = (uint8_t *)inputs[z].dstDevice;
state->outbase = state->out;
state->outend = state->out + inputs[z].dstSize;
state->end = p + src_size;
prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *(uint32_t *)p : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *(uint32_t *)p : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
while (!state->err)
{
if (!t)
{
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else
{
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2))
{
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
init_prefetcher(state, t);
#endif
if (t < BATCH_COUNT)
{
state->x.batch_len[t] = 0;
}
__syncthreads();
// decode data until end-of-block code
if (t < 1*32)
{
// WARP0
if (!t)
{
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
}
else if (t < 2*32)
{
// WARP1
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3*32)
{
// WARP3: Prefetcher
prefetch_warp(state, t & 0x1f);
}
#endif
}
else if (!state->err && state->btype == 0)
{
copy_stored(state, t);
}
if (state->blast)
break;
__syncthreads();
}
__syncthreads();
if (!t)
{
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end)
{
// Read past the end of the input buffer
state->err = 2;
}
else if (state->err == 0 && state->out > state->outend)
{
// Output buffer too small
state->err = 1;
}
outputs[z].bytes_written = state->out - state->outbase;
outputs[z].status = state->err;
outputs[z].reserved = (int)(state->end - state->cur);
}
}
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(gpu_inflate_input_s *inputs)
{
__shared__ const uint8_t * volatile src_g;
__shared__ uint8_t * volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t *src;
uint8_t *dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = reinterpret_cast<const uint8_t *>(inputs[z].srcDevice);
dst = reinterpret_cast<uint8_t *>(inputs[z].dstDevice);
len = min((uint32_t)inputs[z].srcSize, (uint32_t)inputs[z].dstSize);
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) {
dst[t] = src[t];
}
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const uint32_t *src32 = reinterpret_cast<const uint32_t *>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) {
v = __funnelshift_r(v, src32[t + 1], src_align_bits);
}
reinterpret_cast<uint32_t *>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) {
dst[t] = src[t];
}
}
hipError_t __host__ gpuinflate(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, int parse_hdr, hipStream_t stream)
{
if (count > 0) {
hipLaunchKernelGGL(( inflate_kernel) , dim3(count), dim3(NUMTHREADS), 0, stream , inputs, outputs, parse_hdr);
}
return hipSuccess;
}
hipError_t __host__ gpu_copy_uncompressed_blocks(gpu_inflate_input_s *inputs, int count, hipStream_t stream)
{
if (count > 0) {
hipLaunchKernelGGL(( copy_uncompressed_kernel) , dim3(count), dim3(1024), 0, stream , inputs);
}
return hipSuccess;
}
} // namespace io
} // namespace cudf
|
a9102babd1750d3c8ff078d51f6450f9838e8f8d.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
#define NUMTHREADS 128 // Threads per block
#define MAXBITS 15 // maximum bits in a code
#define MAXLCODES 286 // maximum number of literal/length codes
#define MAXDCODES 30 // maximum number of distance codes
#define MAXCODES (MAXLCODES+MAXDCODES) // maximum codes lengths to read
#define FIXLCODES 288 // number of fixed literal/length codes
#define LOG2LENLUT 10
#define LOG2DISTLUT 8
struct scratch_arr
{
int16_t lengths[MAXCODES]; // descriptor code lengths
int16_t offs[MAXBITS + 1]; // offset in symbol table for each length (scratch)
};
struct lut_arr
{
int32_t lenlut[1 << LOG2LENLUT]; // LUT for length decoding
int32_t distlut[1 << LOG2DISTLUT]; // LUT for fast distance decoding
};
// 4 batches of 32 symbols
#define LOG2_BATCH_COUNT 2 // 1..5
#define LOG2_BATCH_SIZE 5
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
struct xwarp_s
{
int32_t batch_len[BATCH_COUNT]; // Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[BATCH_COUNT * BATCH_SIZE];
uint8_t symqueue8[BATCH_COUNT * BATCH_SIZE * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
#define LOG2_PREFETCH_SIZE 9 // Must be at least LOG2_BATCH_SIZE+3
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE)
#define PREFETCH_ADDR32(q, p) (uint32_t *)(&q.pref_data[(PREFETCH_SIZE - 4) & (size_t)(p)])
struct prefetch_queue_s
{
const uint8_t *cur_p; // Prefetch location
int run; // prefetcher will exit when run=0
uint8_t pref_data[PREFETCH_SIZE];
};
#endif // ENABLE_PREFETCH
struct inflate_state_s {
// output state
uint8_t *out; // output buffer
uint8_t *outbase; // start of output buffer
uint8_t *outend; // end of output buffer
// Input state
uint8_t *cur; // input buffer
uint8_t *end; // end of input buffer
uint2 bitbuf; // bit buffer (64-bit)
uint32_t bitpos; // position in bit buffer
int32_t err;
int btype; // current block type
int blast; // last block
uint32_t stored_blk_len;// length of stored (uncompressed) block
uint16_t first_slow_len; // first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[MAXBITS+1];
int16_t lensym[FIXLCODES]; // Assumes FIXLCODES >= MAXLCODES
int16_t distcnt[MAXBITS+1];
int16_t distsym[MAXDCODES];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source, unsigned int bit_start, unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s *s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s *s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s *s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32)
{
uint8_t *cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *(uint32_t *)cur : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would become quite a bit faster
__device__ uint32_t getbits(inflate_state_s *s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/*
* Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading MAXBITS bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s *s, const int16_t *counts, const int16_t *symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= MAXBITS; len++)
{
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/*
* Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= MAXBITS
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(inflate_state_s *s, int16_t *counts, int16_t *symbols, const int16_t *length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t *offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= MAXBITS; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= MAXBITS; len++)
{
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0)
return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < MAXBITS; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0)
symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff };
// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s *s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t *lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > MAXLCODES || ndist > MAXDCODES)
{
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist)
{
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0)
return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0)
return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
}
else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist)
return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0)
return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/*
* Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s *s)
{
int16_t *lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < FIXLCODES; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, FIXLCODES);
// distance table
for (symbol = 0; symbol < MAXDCODES; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, MAXDCODES);
return 0;
}
/*
* Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258 };
static const __device__ __constant__ uint16_t g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4,
5, 5, 5, 5, 0 };
static const __device__ __constant__ uint16_t g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577 };
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
12, 12, 13, 13 };
// Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s *s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
uint8_t *cur = s->cur;
uint8_t *end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do
{
volatile uint32_t *b = &s->x.u.symqueue[batch * BATCH_SIZE];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t *)&s->pref.cur_p - (int32_t)(size_t)cur < BATCH_SIZE*6)
|| (s->x.batch_len[batch] != 0))
#else
while (s->x.batch_len[batch] != 0)
#endif
{
NANOSLEEP(100);
}
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos>>3) >= end)
{
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do
{
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << LOG2LENLUT) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100<<5))
{
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << LOG2LENLUT) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
}
else
{
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t *symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = LOG2LENLUT+1; len <= MAXBITS; len++)
{
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > MAXBITS)
{
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256)
{
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256)
{
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32)
{
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *PREFETCH_ADDR32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t *)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << LOG2DISTLUT) - 1)];
if (dist > 0)
{
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
}
else
{
uint32_t next32r = __brev(next32);
const int16_t *symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = LOG2DISTLUT + 1; len <= MAXBITS; len++)
{
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > MAXBITS)
{
s->err = -10;
sym = 256;
len = 0;
}
else
{
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input buffer
bitpos += len;
if (bitpos >= 32)
{
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *PREFETCH_ADDR32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end)
{
bitbuf.y = *(const uint32_t *)cur;
cur -= 4;
}
else
{
bitbuf.y = 0;
cur -= 4;
if (cur > end)
{
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256)
break;
b[batch_len++] = sym;
} while (batch_len < BATCH_SIZE-1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s *)s)->cur = cur;
#endif
if (batch_len != 0)
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0)
{
NANOSLEEP(150);
}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
// Build lookup tables for faster decode
// LUT format is symbols*16+length
__device__ void init_length_lut(inflate_state_s *s, int t)
{
int32_t *lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << LOG2LENLUT); bits += NUMTHREADS)
{
const int16_t *cnt = s->lencnt;
const int16_t *symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - LOG2LENLUT);
for (unsigned int len = 1; len <= LOG2LENLUT; len++)
{
unsigned int code = (rbits >> (LOG2LENLUT - len)) - first;
unsigned int count = cnt[len];
if (code < count)
{
sym = symbols[code];
if (sym > 256)
{
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16-5)) | (len << (24-5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t)
{
unsigned int first = 0;
unsigned int index = 0;
const int16_t *cnt = s->lencnt;
for (unsigned int len = 1; len <= LOG2LENLUT; len++)
{
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
// Build lookup tables for faster decode of distance symbol
// LUT format is symbols*16+length
__device__ void init_distance_lut(inflate_state_s *s, int t)
{
int32_t *lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << LOG2DISTLUT); bits += NUMTHREADS)
{
const int16_t *cnt = s->distcnt;
const int16_t *symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - LOG2DISTLUT);
for (unsigned int len = 1; len <= LOG2DISTLUT; len++)
{
unsigned int code = (rbits >> (LOG2DISTLUT - len)) - first;
unsigned int count = cnt[len];
if (code < count)
{
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t)
{
unsigned int first = 0;
unsigned int index = 0;
const int16_t *cnt = s->distcnt;
for (unsigned int len = 1; len <= LOG2DISTLUT; len++)
{
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
// WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s *s, int t)
{
uint8_t *out = s->out;
const uint8_t *outend = s->outend;
const uint8_t *outbase = s->outbase;
int batch = 0;
do
{
volatile uint32_t *b = &s->x.u.symqueue[batch * BATCH_SIZE];
int batch_len, pos;
int32_t symt;
uint32_t lit_mask;
if (t == 0)
{
while ((batch_len = s->x.batch_len[batch]) == 0)
{
NANOSLEEP(100);
}
}
else
{
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len < 0)
{
break;
}
symt = (t < batch_len) ? b[t] : 256;
lit_mask = BALLOT(symt >= 256);
pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0)
{
s->x.batch_len[batch] = 0;
}
if (t < pos && out+t < outend)
{
out[t] = symt;
}
out += pos;
batch_len -= pos;
while (batch_len > 0)
{
int dist, len, symbol;
// Process a non-literal symbol
symbol = SHFL(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32)
{
const uint8_t *src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend)
{
out[i] = b;
}
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1))
{
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = SHFL(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend)
{
out[t] = symbol;
}
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
if (t == 0)
{
s->out = out;
}
}
/*
* Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s *s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7)
{
skipbits(s, 8 - (s->bitpos & 7));
}
if (s->cur + (s->bitpos >> 3) >= s->end)
{
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff))
{
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end)
{
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s *s, int t)
{
int len = s->stored_blk_len;
uint8_t *cur = s->cur + (s->bitpos >> 3);
uint8_t *out = s->out;
uint8_t *outend = s->outend;
uint8_t *cur4;
int slow_bytes = min(len, (int)((16 - (size_t)out) & 0xf));
int fast_bytes, bitpos;
// Slow copy until output is 16B aligned
if (slow_bytes)
{
for (int i = t; i < slow_bytes; i += NUMTHREADS)
{
if (out + i < outend)
{
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
fast_bytes = len;
if (out < outend)
{
fast_bytes = (int)min((size_t)fast_bytes, (outend - out));
}
fast_bytes &= ~0xf;
bitpos = ((int)(3 & (size_t)cur)) << 3;
cur4 = cur - (bitpos >> 3);
if (out < outend)
{
// Fast copy 16 bytes at a time
for (int i = t*16; i < fast_bytes; i += NUMTHREADS*16)
{
uint4 u;
u.x = *(const uint32_t *)(cur4 + i + 0*4);
u.y = *(const uint32_t *)(cur4 + i + 1*4);
u.z = *(const uint32_t *)(cur4 + i + 2*4);
u.w = *(const uint32_t *)(cur4 + i + 3*4);
if (bitpos != 0)
{
uint32_t v = (bitpos != 0) ? *(const uint32_t *)(cur4 + i + 4*4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*(uint4 *)(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += NUMTHREADS)
{
if (out + i < outend)
{
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0)
{
// Reset bitstream to end of block
uint8_t *p = cur + len;
uint32_t prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *(uint32_t *)p : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *(uint32_t *)p : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s *s, int t)
{
if (t == 0)
{
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s *s, int t)
{
const uint8_t *cur_p = s->pref.cur_p;
const uint8_t *end = s->end;
while (SHFL0((t == 0) ? s->pref.run : 0))
{
int32_t cur_lo = (int32_t)(size_t)cur_p;
int do_pref = SHFL0((t == 0) ? (cur_lo - *(volatile int32_t *)&s->cur < PREFETCH_SIZE-32*4-4) : 0);
if (do_pref)
{
const uint8_t *p = cur_p + 4*t;
*PREFETCH_ADDR32(s->pref, p) = (p < end) ? *(const uint32_t *)p : 0;
cur_p += 4*32;
__threadfence_block();
SYNCWARP();
if (!t)
{
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
else if (t == 0)
{
NANOSLEEP(150);
}
}
}
#endif // ENABLE_PREFETCH
#define GZ_FLG_FTEXT 0x01 // ASCII text hint
#define GZ_FLG_FHCRC 0x02 // Header CRC present
#define GZ_FLG_FEXTRA 0x04 // Extra fields present
#define GZ_FLG_FNAME 0x08 // Original file name present
#define GZ_FLG_FCOMMENT 0x10 // Comment present
__device__ int parse_gzip_header(const uint8_t *src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18)
{
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint32_t flags = src[3];
hdr_len = 10;
if (flags & GZ_FLG_FEXTRA) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len+1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size)
return -1;
}
if (flags & GZ_FLG_FNAME) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size)
return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZ_FLG_FCOMMENT) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size)
return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZ_FLG_FHCRC) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len+8 >= src_size)
hdr_len = -1;
}
}
return hdr_len;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(NUMTHREADS)
inflate_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s *state = &state_g;
if (!t)
{
uint8_t *p = (uint8_t *)inputs[z].srcDevice;
size_t src_size = inputs[z].srcSize;
uint32_t prefix_bytes;
// Parse header if needed
state->err = 0;
if (parse_hdr)
{
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = (uint8_t *)inputs[z].dstDevice;
state->outbase = state->out;
state->outend = state->out + inputs[z].dstSize;
state->end = p + src_size;
prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *(uint32_t *)p : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *(uint32_t *)p : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
while (!state->err)
{
if (!t)
{
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else
{
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2))
{
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
init_prefetcher(state, t);
#endif
if (t < BATCH_COUNT)
{
state->x.batch_len[t] = 0;
}
__syncthreads();
// decode data until end-of-block code
if (t < 1*32)
{
// WARP0
if (!t)
{
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
}
else if (t < 2*32)
{
// WARP1
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3*32)
{
// WARP3: Prefetcher
prefetch_warp(state, t & 0x1f);
}
#endif
}
else if (!state->err && state->btype == 0)
{
copy_stored(state, t);
}
if (state->blast)
break;
__syncthreads();
}
__syncthreads();
if (!t)
{
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end)
{
// Read past the end of the input buffer
state->err = 2;
}
else if (state->err == 0 && state->out > state->outend)
{
// Output buffer too small
state->err = 1;
}
outputs[z].bytes_written = state->out - state->outbase;
outputs[z].status = state->err;
outputs[z].reserved = (int)(state->end - state->cur);
}
}
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(gpu_inflate_input_s *inputs)
{
__shared__ const uint8_t * volatile src_g;
__shared__ uint8_t * volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t *src;
uint8_t *dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = reinterpret_cast<const uint8_t *>(inputs[z].srcDevice);
dst = reinterpret_cast<uint8_t *>(inputs[z].dstDevice);
len = min((uint32_t)inputs[z].srcSize, (uint32_t)inputs[z].dstSize);
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) {
dst[t] = src[t];
}
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const uint32_t *src32 = reinterpret_cast<const uint32_t *>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) {
v = __funnelshift_r(v, src32[t + 1], src_align_bits);
}
reinterpret_cast<uint32_t *>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) {
dst[t] = src[t];
}
}
cudaError_t __host__ gpuinflate(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, int parse_hdr, cudaStream_t stream)
{
if (count > 0) {
inflate_kernel <<< count, NUMTHREADS, 0, stream >>>(inputs, outputs, parse_hdr);
}
return cudaSuccess;
}
cudaError_t __host__ gpu_copy_uncompressed_blocks(gpu_inflate_input_s *inputs, int count, cudaStream_t stream)
{
if (count > 0) {
copy_uncompressed_kernel <<< count, 1024, 0, stream >>>(inputs);
}
return cudaSuccess;
}
} // namespace io
} // namespace cudf
|
bc93f509ebb22b0cab4ebae67be365fac6eb79ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "parallax/linearFlowWarper.hpp"
#include "./kernels/patchDifferenceFunction.cu"
#include "backend/common/vectorOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "backend/cuda/core1/kernels/samplingKernel.cu"
#include "gpu/image/sampling.hpp"
#include "gpu/image/imageOps.hpp"
#include "gpu/image/blur.hpp"
#include "gpu/stream.hpp"
#include "cuda/error.hpp"
#include "cuda/util.hpp"
#include <string.h>
namespace VideoStitch {
namespace Core {
#define WARPER_BLOCK_SIZE_X 16
#define WARPER_BLOCK_SIZE_Y 16
struct BilinearLookupFlow {
typedef float2 Type;
static inline __device__ Type outOfRangeValue() { return make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); }
static inline __device__ Type interpolate(const float2 uv, const Type topLeft, const Type topRight,
const Type bottomRight, const Type bottomLeft) {
Type total = make_float2(0, 0);
float weight = 0;
const int uTopLeft = floorf(uv.x);
const int vTopLeft = floorf(uv.y);
const float du = (uv.x - uTopLeft);
const float dv = (uv.y - vTopLeft);
if (topLeft.x != INVALID_FLOW_VALUE) {
total += topLeft * (1.0f - du) * (1.0f - dv);
weight += (1.0f - du) * (1.0f - dv);
} else {
return outOfRangeValue();
}
if (topRight.x != INVALID_FLOW_VALUE) {
total += topRight * du * (1.0f - dv);
weight += du * (1.0f - dv);
} else {
return outOfRangeValue();
}
if (bottomRight.x != INVALID_FLOW_VALUE) {
total += bottomLeft * (1.0f - du) * dv;
weight += (1.0f - du) * dv;
} else {
return outOfRangeValue();
}
if (bottomLeft.x != INVALID_FLOW_VALUE) {
total += bottomRight * du * dv;
weight += du * dv;
} else {
return outOfRangeValue();
}
if (weight) {
return total / weight;
} else {
return outOfRangeValue();
}
}
};
// Warp an image from pano space to input space
// Use both the reference mapping and the
__global__ void linearFlowWarpKernel(
const int warpOutputPanoWidth, const int warpedOffsetX, const int warpedOffsetY, const int warpedWidth,
const int warpedHeight, uint32_t* warpedBuffer, const int inputWidth, const int inputHeight,
const uint32_t* inputBuffer, const float2* panoToInputBuffer, const float2* panoToInterBuffer,
const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight,
const float2* interToInputBuffer, const int flowOffsetX, const int flowOffsetY, const int flowWidth,
const int flowHeight, const float2* interToInterFlowBuffer, const int lookupOffsetX, const int lookupOffsetY,
const int weightOffsetX, const int weightOffsetY, const int weightWidth, const int weightHeight,
const unsigned char* interToInterWeightBuffer, float4* debug, uint32_t* flowWarpedBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < warpedWidth && y < warpedHeight) {
const int index = y * warpedWidth + x;
const float2 panoToInput = panoToInputBuffer[index];
float2 lookupInput = panoToInput;
const float2 panoToInter = panoToInterBuffer[index];
// Weight map is in the pano coordinate
// Get the correct weight
const int2 weightLookupCoord =
make_int2(x, y) + make_int2(warpedOffsetX, warpedOffsetY) - make_int2(weightOffsetX, weightOffsetY);
float weight = 255;
if (weightLookupCoord.x >= 0 && weightLookupCoord.x < weightWidth && weightLookupCoord.y >= 0 &&
weightLookupCoord.y < weightHeight) {
weight = interToInterWeightBuffer[weightLookupCoord.y * weightWidth + weightLookupCoord.x];
}
weight /= 255;
//// Find the flow here
float2 interFlowInput = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
const float2 interFlowLookupcoord = panoToInter - make_float2(flowOffsetX, flowOffsetY);
if (interFlowLookupcoord.x >= 0 && interFlowLookupcoord.x < flowWidth && interFlowLookupcoord.y >= 0 &&
interFlowLookupcoord.y < flowHeight) {
// This is the flow from 0 to 1
float2 interToInterFlow = Image::bilinearLookup<BilinearLookupFlow>(
interFlowLookupcoord, make_int2(flowWidth, flowHeight), interToInterFlowBuffer);
// Proceed with valid flow only
if (interToInterFlow.x != INVALID_FLOW_VALUE && interToInterFlow.y != INVALID_FLOW_VALUE) {
// Convert from optical-flow based coordinate to intermediate coordinate
interToInterFlow =
interToInterFlow - make_float2(interOffsetX, interOffsetY) + make_float2(lookupOffsetX, lookupOffsetY);
if (interToInterFlow.x >= 0 && interToInterFlow.y >= 0 && interToInterFlow.x < interWidth &&
interToInterFlow.y < interHeight) {
interFlowInput = Image::bilinearLookup<BilinearLookupFlow>(
interToInterFlow, make_int2(interWidth, interHeight), interToInputBuffer);
if (interFlowInput.x != INVALID_FLOW_VALUE && interFlowInput.y != INVALID_FLOW_VALUE) {
lookupInput = panoToInput * weight + interFlowInput * (1 - weight);
} else {
lookupInput = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
}
}
}
}
debug[index] = make_float4(weight, weight, weight, weight);
warpedBuffer[index] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(
lookupInput, make_int2(inputWidth, inputHeight), inputBuffer);
if (flowWarpedBuffer) {
flowWarpedBuffer[index] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(
interFlowInput, make_int2(inputWidth, inputHeight), inputBuffer);
}
}
}
Status LinearFlowWarper::warp(GPU::Buffer<uint32_t> warpedBuffer, const GPU::Buffer<const uint32_t> inputBuffer,
const Rect& flowRect, const GPU::Buffer<const float2> flow, const int lookupOffsetX,
const int lookupOffsetY, GPU::Buffer<float4> debug,
GPU::Buffer<uint32_t> flowWarpedBuffer, GPU::Stream gpuStream) {
hipStream_t stream = gpuStream.get();
// Flow is in the intermediate space, flow from image 1 to image 0 based on template from image 0
// Input buffer - the original input images
// Warped buffer - final image in the pano space
// Weight buffer - in the pano space
// Need to blend flow in the input space
Rect panoRect1 = mergerPair->getBoundingPanoRect(1);
Rect iRect = mergerPair->getBoundingPanosIRect();
dim3 dimBlock(WARPER_BLOCK_SIZE_X, WARPER_BLOCK_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(panoRect1.getWidth(), dimBlock.x),
(unsigned)Cuda::ceilDiv(panoRect1.getHeight(), dimBlock.y), 1);
// First, lookup the flow in pano space from intermediate space
Rect interRect1 = mergerPair->getBoundingInterRect(1, 0);
const int2 inputSize1 = mergerPair->getInput1Size();
hipLaunchKernelGGL(( linearFlowWarpKernel), dim3(dimGrid), dim3(dimBlock), 0, stream,
mergerPair->getWrapWidth(), (int)panoRect1.left(), (int)panoRect1.top(), (int)panoRect1.getWidth(),
(int)panoRect1.getHeight(), warpedBuffer.get(), inputSize1.x, inputSize1.y, inputBuffer.get(),
mergerPair->getPanoToInputSpaceCoordMapping(1).get(), mergerPair->getPanoToInterSpaceCoordMapping(1).get(),
(int)interRect1.left(), (int)interRect1.top(), (int)interRect1.getWidth(), (int)interRect1.getHeight(),
mergerPair->getInterToLookupSpaceCoordMappingBufferLevel(1, 0).get(), (int)flowRect.left(), (int)flowRect.top(),
(int)flowRect.getWidth(), (int)flowRect.getHeight(), flow.get(), lookupOffsetX, lookupOffsetY, (int)iRect.left(),
(int)iRect.top(), (int)iRect.getWidth(), (int)iRect.getHeight(), linearMaskWeight.borrow_const().get(),
debug.get(), flowWarpedBuffer.get());
return CUDA_STATUS;
}
} // namespace Core
} // namespace VideoStitch
|
bc93f509ebb22b0cab4ebae67be365fac6eb79ae.cu
|
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "parallax/linearFlowWarper.hpp"
#include "./kernels/patchDifferenceFunction.cu"
#include "backend/common/vectorOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "backend/cuda/core1/kernels/samplingKernel.cu"
#include "gpu/image/sampling.hpp"
#include "gpu/image/imageOps.hpp"
#include "gpu/image/blur.hpp"
#include "gpu/stream.hpp"
#include "cuda/error.hpp"
#include "cuda/util.hpp"
#include <string.h>
namespace VideoStitch {
namespace Core {
#define WARPER_BLOCK_SIZE_X 16
#define WARPER_BLOCK_SIZE_Y 16
struct BilinearLookupFlow {
typedef float2 Type;
static inline __device__ Type outOfRangeValue() { return make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); }
static inline __device__ Type interpolate(const float2 uv, const Type topLeft, const Type topRight,
const Type bottomRight, const Type bottomLeft) {
Type total = make_float2(0, 0);
float weight = 0;
const int uTopLeft = floorf(uv.x);
const int vTopLeft = floorf(uv.y);
const float du = (uv.x - uTopLeft);
const float dv = (uv.y - vTopLeft);
if (topLeft.x != INVALID_FLOW_VALUE) {
total += topLeft * (1.0f - du) * (1.0f - dv);
weight += (1.0f - du) * (1.0f - dv);
} else {
return outOfRangeValue();
}
if (topRight.x != INVALID_FLOW_VALUE) {
total += topRight * du * (1.0f - dv);
weight += du * (1.0f - dv);
} else {
return outOfRangeValue();
}
if (bottomRight.x != INVALID_FLOW_VALUE) {
total += bottomLeft * (1.0f - du) * dv;
weight += (1.0f - du) * dv;
} else {
return outOfRangeValue();
}
if (bottomLeft.x != INVALID_FLOW_VALUE) {
total += bottomRight * du * dv;
weight += du * dv;
} else {
return outOfRangeValue();
}
if (weight) {
return total / weight;
} else {
return outOfRangeValue();
}
}
};
// Warp an image from pano space to input space
// Use both the reference mapping and the
__global__ void linearFlowWarpKernel(
const int warpOutputPanoWidth, const int warpedOffsetX, const int warpedOffsetY, const int warpedWidth,
const int warpedHeight, uint32_t* warpedBuffer, const int inputWidth, const int inputHeight,
const uint32_t* inputBuffer, const float2* panoToInputBuffer, const float2* panoToInterBuffer,
const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight,
const float2* interToInputBuffer, const int flowOffsetX, const int flowOffsetY, const int flowWidth,
const int flowHeight, const float2* interToInterFlowBuffer, const int lookupOffsetX, const int lookupOffsetY,
const int weightOffsetX, const int weightOffsetY, const int weightWidth, const int weightHeight,
const unsigned char* interToInterWeightBuffer, float4* debug, uint32_t* flowWarpedBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < warpedWidth && y < warpedHeight) {
const int index = y * warpedWidth + x;
const float2 panoToInput = panoToInputBuffer[index];
float2 lookupInput = panoToInput;
const float2 panoToInter = panoToInterBuffer[index];
// Weight map is in the pano coordinate
// Get the correct weight
const int2 weightLookupCoord =
make_int2(x, y) + make_int2(warpedOffsetX, warpedOffsetY) - make_int2(weightOffsetX, weightOffsetY);
float weight = 255;
if (weightLookupCoord.x >= 0 && weightLookupCoord.x < weightWidth && weightLookupCoord.y >= 0 &&
weightLookupCoord.y < weightHeight) {
weight = interToInterWeightBuffer[weightLookupCoord.y * weightWidth + weightLookupCoord.x];
}
weight /= 255;
//// Find the flow here
float2 interFlowInput = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
const float2 interFlowLookupcoord = panoToInter - make_float2(flowOffsetX, flowOffsetY);
if (interFlowLookupcoord.x >= 0 && interFlowLookupcoord.x < flowWidth && interFlowLookupcoord.y >= 0 &&
interFlowLookupcoord.y < flowHeight) {
// This is the flow from 0 to 1
float2 interToInterFlow = Image::bilinearLookup<BilinearLookupFlow>(
interFlowLookupcoord, make_int2(flowWidth, flowHeight), interToInterFlowBuffer);
// Proceed with valid flow only
if (interToInterFlow.x != INVALID_FLOW_VALUE && interToInterFlow.y != INVALID_FLOW_VALUE) {
// Convert from optical-flow based coordinate to intermediate coordinate
interToInterFlow =
interToInterFlow - make_float2(interOffsetX, interOffsetY) + make_float2(lookupOffsetX, lookupOffsetY);
if (interToInterFlow.x >= 0 && interToInterFlow.y >= 0 && interToInterFlow.x < interWidth &&
interToInterFlow.y < interHeight) {
interFlowInput = Image::bilinearLookup<BilinearLookupFlow>(
interToInterFlow, make_int2(interWidth, interHeight), interToInputBuffer);
if (interFlowInput.x != INVALID_FLOW_VALUE && interFlowInput.y != INVALID_FLOW_VALUE) {
lookupInput = panoToInput * weight + interFlowInput * (1 - weight);
} else {
lookupInput = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
}
}
}
}
debug[index] = make_float4(weight, weight, weight, weight);
warpedBuffer[index] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(
lookupInput, make_int2(inputWidth, inputHeight), inputBuffer);
if (flowWarpedBuffer) {
flowWarpedBuffer[index] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(
interFlowInput, make_int2(inputWidth, inputHeight), inputBuffer);
}
}
}
Status LinearFlowWarper::warp(GPU::Buffer<uint32_t> warpedBuffer, const GPU::Buffer<const uint32_t> inputBuffer,
const Rect& flowRect, const GPU::Buffer<const float2> flow, const int lookupOffsetX,
const int lookupOffsetY, GPU::Buffer<float4> debug,
GPU::Buffer<uint32_t> flowWarpedBuffer, GPU::Stream gpuStream) {
cudaStream_t stream = gpuStream.get();
// Flow is in the intermediate space, flow from image 1 to image 0 based on template from image 0
// Input buffer - the original input images
// Warped buffer - final image in the pano space
// Weight buffer - in the pano space
// Need to blend flow in the input space
Rect panoRect1 = mergerPair->getBoundingPanoRect(1);
Rect iRect = mergerPair->getBoundingPanosIRect();
dim3 dimBlock(WARPER_BLOCK_SIZE_X, WARPER_BLOCK_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(panoRect1.getWidth(), dimBlock.x),
(unsigned)Cuda::ceilDiv(panoRect1.getHeight(), dimBlock.y), 1);
// First, lookup the flow in pano space from intermediate space
Rect interRect1 = mergerPair->getBoundingInterRect(1, 0);
const int2 inputSize1 = mergerPair->getInput1Size();
linearFlowWarpKernel<<<dimGrid, dimBlock, 0, stream>>>(
mergerPair->getWrapWidth(), (int)panoRect1.left(), (int)panoRect1.top(), (int)panoRect1.getWidth(),
(int)panoRect1.getHeight(), warpedBuffer.get(), inputSize1.x, inputSize1.y, inputBuffer.get(),
mergerPair->getPanoToInputSpaceCoordMapping(1).get(), mergerPair->getPanoToInterSpaceCoordMapping(1).get(),
(int)interRect1.left(), (int)interRect1.top(), (int)interRect1.getWidth(), (int)interRect1.getHeight(),
mergerPair->getInterToLookupSpaceCoordMappingBufferLevel(1, 0).get(), (int)flowRect.left(), (int)flowRect.top(),
(int)flowRect.getWidth(), (int)flowRect.getHeight(), flow.get(), lookupOffsetX, lookupOffsetY, (int)iRect.left(),
(int)iRect.top(), (int)iRect.getWidth(), (int)iRect.getHeight(), linearMaskWeight.borrow_const().get(),
debug.get(), flowWarpedBuffer.get());
return CUDA_STATUS;
}
} // namespace Core
} // namespace VideoStitch
|
252a2991edd9962c80e99860f92e39e142ee405e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
KAM PUI SO
CS510 GPU
Homework 4
Pre 12.1 problem. No const memory for kernel
*/
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMATOM 32
#define DIMX 64
#define DIMY 64
#define DIMZ 64
#define MAXW 10
#define SPACE 2
#define TILESIZE 16
// data structure
typedef struct {
float w; // charge
float x;
float y;
float z;
} AtomInfo;
// global variable
AtomInfo atominfo[NUMATOM];
// __constant__ AtomInfo atominfo[NUMATOM];
// kernal
__global__ void cenergy_kernel(float *energygrid, dim3 grid, float gridspacing, float z, AtomInfo *atominfo, int numatoms) {
int xindex = blockIdx.x * blockDim.x + threadIdx.x;
int yindex = blockIdx.y * blockDim.y + threadIdx.y;
int k = z / gridspacing;
int outaddr = grid.x * grid.y * k + grid.x * yindex + xindex;
float curenergy = energygrid[outaddr];
float coorx = gridspacing * (float) xindex;
float coory = gridspacing * (float) yindex;
int atomid;
float energyval = 0.0f;
for (atomid = 0; atomid < numatoms; ++atomid) {
float dx = coorx - atominfo[atomid].x;
float dy = coory - atominfo[atomid].y;
// float dz = z - atominfo[atomid].z;
energyval += atominfo[atomid].w / sqrtf(dx*dx + dy*dy + atominfo[atomid].z);
}
energygrid[outaddr] = curenergy + energyval;
}
// host
void cenergy_dev(float *energygrid, dim3 grid, float gridspacing, float z, AtomInfo *atominfo, int numatoms) {
// Step 1: allocate memory
float * dev_energygrid;
AtomInfo * dev_atominfo;
int gridSize = grid.x * grid.y * grid.z;
hipMalloc((void **) &dev_energygrid, sizeof(float) * gridSize);
hipMalloc((void **) &dev_atominfo, sizeof(AtomInfo) * numatoms);
// Step 2: copy the input vector to the device
hipMemcpy(dev_atominfo, atominfo, sizeof(AtomInfo) * numatoms, hipMemcpyHostToDevice);
// Step 3: Invoke the kernel
dim3 dimGrid(TILESIZE, TILESIZE, 1);
dim3 dimBlock(ceil(grid.x / (float) TILESIZE), ceil(grid.y / (float) TILESIZE), 1);
hipLaunchKernelGGL(( cenergy_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_energygrid, grid, gridspacing, z, dev_atominfo, numatoms);
// Step 4: Retrieve the results
hipMemcpy(energygrid, dev_energygrid, sizeof(float) * gridSize, hipMemcpyDeviceToHost);
// Step 5: Free device memory
hipFree(dev_energygrid);
hipFree(dev_atominfo);
}
// initialize atominfo
void initialize() {
for (int i = 0; i < NUMATOM; ++i) {
atominfo[i].w = rand() % MAXW;
atominfo[i].x = rand() % DIMX;
atominfo[i].y = rand() % DIMY;
atominfo[i].z = rand() % DIMZ;
}
}
/*
// uniform charge
void uniformCharge() {
for (int i = 0; i < NUMATOM; ++i) {
atominfo[i].w = 1.0f;
atominfo[i].x = rand() % DIMX;
atominfo[i].y = rand() % DIMY;
atominfo[i].z = rand() % DIMZ;
}
}
*/
// print atoms
void printAtoms() {
for (int i = 0; i < NUMATOM; ++i) {
printf("index=%d, charge=%.2f, x=%.2f, y=%.2f, z=%.2f\n", i, atominfo[i].w, atominfo[i].x, atominfo[i].y, atominfo[i].z);
}
}
// serial energy calculation
void serial(float *energygrid, dim3 grid, float gridspacing, float z, int numatom) {
int i, j, n;
int k = z / gridspacing;
float x, y, energy;
for (j = 0; j < grid.y; ++j) {
y = gridspacing * (float) j;
for (i = 0; i < grid.x; ++i) {
x = gridspacing * (float) i;
energy = 0.0f;
for (n = 0; n < numatom; ++n) {
float dx = x - atominfo[n].x;
float dy = y - atominfo[n].y;
float dz = z - atominfo[n].z;
energy += atominfo[n].w / sqrtf(dx*dx + dy*dy + dz*dz);
}
energygrid[grid.x * grid.y * k + grid.x * j + i] = energy;
}
}
}
// print energy grid
void printEnergy(float *energygrid, dim3 grid, float gridspacing) {
for (int z = 0; z < grid.z; ++z) {
for (int y = 0; y < grid.y; ++y) {
for (int x = 0; x < grid.x; ++x) {
printf("x=%d, y=%d, z=%d, potential=%.2f\n", x, y, z, energygrid[grid.x *grid.y * z + grid.x * y + x]);
}
}
}
}
// energy main
void energy() {
dim3 grid(DIMX/SPACE, DIMY/SPACE, DIMZ/SPACE);
float gridspacing = (float) SPACE;
float energygrid[DIMX/SPACE * DIMY/SPACE * DIMZ/SPACE];
float z = 0.0f;
for (int i = 0; i < grid.z; ++i) {
z = gridspacing * (float) i;
cenergy_dev(energygrid, grid, gridspacing, z, atominfo, NUMATOM);
// serial(energygrid, grid, gridspacing, z, NUMATOM);
}
printEnergy(energygrid, grid, gridspacing);
}
// main
int main(void) {
// initialize
srand(time(NULL));
initialize();
printAtoms();
energy();
return 0;
}
|
252a2991edd9962c80e99860f92e39e142ee405e.cu
|
/*
KAM PUI SO
CS510 GPU
Homework 4
Pre 12.1 problem. No const memory for kernel
*/
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMATOM 32
#define DIMX 64
#define DIMY 64
#define DIMZ 64
#define MAXW 10
#define SPACE 2
#define TILESIZE 16
// data structure
typedef struct {
float w; // charge
float x;
float y;
float z;
} AtomInfo;
// global variable
AtomInfo atominfo[NUMATOM];
// __constant__ AtomInfo atominfo[NUMATOM];
// kernal
__global__ void cenergy_kernel(float *energygrid, dim3 grid, float gridspacing, float z, AtomInfo *atominfo, int numatoms) {
int xindex = blockIdx.x * blockDim.x + threadIdx.x;
int yindex = blockIdx.y * blockDim.y + threadIdx.y;
int k = z / gridspacing;
int outaddr = grid.x * grid.y * k + grid.x * yindex + xindex;
float curenergy = energygrid[outaddr];
float coorx = gridspacing * (float) xindex;
float coory = gridspacing * (float) yindex;
int atomid;
float energyval = 0.0f;
for (atomid = 0; atomid < numatoms; ++atomid) {
float dx = coorx - atominfo[atomid].x;
float dy = coory - atominfo[atomid].y;
// float dz = z - atominfo[atomid].z;
energyval += atominfo[atomid].w / sqrtf(dx*dx + dy*dy + atominfo[atomid].z);
}
energygrid[outaddr] = curenergy + energyval;
}
// host
void cenergy_dev(float *energygrid, dim3 grid, float gridspacing, float z, AtomInfo *atominfo, int numatoms) {
// Step 1: allocate memory
float * dev_energygrid;
AtomInfo * dev_atominfo;
int gridSize = grid.x * grid.y * grid.z;
cudaMalloc((void **) &dev_energygrid, sizeof(float) * gridSize);
cudaMalloc((void **) &dev_atominfo, sizeof(AtomInfo) * numatoms);
// Step 2: copy the input vector to the device
cudaMemcpy(dev_atominfo, atominfo, sizeof(AtomInfo) * numatoms, cudaMemcpyHostToDevice);
// Step 3: Invoke the kernel
dim3 dimGrid(TILESIZE, TILESIZE, 1);
dim3 dimBlock(ceil(grid.x / (float) TILESIZE), ceil(grid.y / (float) TILESIZE), 1);
cenergy_kernel<<<dimGrid, dimBlock>>>(dev_energygrid, grid, gridspacing, z, dev_atominfo, numatoms);
// Step 4: Retrieve the results
cudaMemcpy(energygrid, dev_energygrid, sizeof(float) * gridSize, cudaMemcpyDeviceToHost);
// Step 5: Free device memory
cudaFree(dev_energygrid);
cudaFree(dev_atominfo);
}
// initialize atominfo
void initialize() {
for (int i = 0; i < NUMATOM; ++i) {
atominfo[i].w = rand() % MAXW;
atominfo[i].x = rand() % DIMX;
atominfo[i].y = rand() % DIMY;
atominfo[i].z = rand() % DIMZ;
}
}
/*
// uniform charge
void uniformCharge() {
for (int i = 0; i < NUMATOM; ++i) {
atominfo[i].w = 1.0f;
atominfo[i].x = rand() % DIMX;
atominfo[i].y = rand() % DIMY;
atominfo[i].z = rand() % DIMZ;
}
}
*/
// print atoms
void printAtoms() {
for (int i = 0; i < NUMATOM; ++i) {
printf("index=%d, charge=%.2f, x=%.2f, y=%.2f, z=%.2f\n", i, atominfo[i].w, atominfo[i].x, atominfo[i].y, atominfo[i].z);
}
}
// serial energy calculation
void serial(float *energygrid, dim3 grid, float gridspacing, float z, int numatom) {
int i, j, n;
int k = z / gridspacing;
float x, y, energy;
for (j = 0; j < grid.y; ++j) {
y = gridspacing * (float) j;
for (i = 0; i < grid.x; ++i) {
x = gridspacing * (float) i;
energy = 0.0f;
for (n = 0; n < numatom; ++n) {
float dx = x - atominfo[n].x;
float dy = y - atominfo[n].y;
float dz = z - atominfo[n].z;
energy += atominfo[n].w / sqrtf(dx*dx + dy*dy + dz*dz);
}
energygrid[grid.x * grid.y * k + grid.x * j + i] = energy;
}
}
}
// print energy grid
void printEnergy(float *energygrid, dim3 grid, float gridspacing) {
for (int z = 0; z < grid.z; ++z) {
for (int y = 0; y < grid.y; ++y) {
for (int x = 0; x < grid.x; ++x) {
printf("x=%d, y=%d, z=%d, potential=%.2f\n", x, y, z, energygrid[grid.x *grid.y * z + grid.x * y + x]);
}
}
}
}
// energy main
void energy() {
dim3 grid(DIMX/SPACE, DIMY/SPACE, DIMZ/SPACE);
float gridspacing = (float) SPACE;
float energygrid[DIMX/SPACE * DIMY/SPACE * DIMZ/SPACE];
float z = 0.0f;
for (int i = 0; i < grid.z; ++i) {
z = gridspacing * (float) i;
cenergy_dev(energygrid, grid, gridspacing, z, atominfo, NUMATOM);
// serial(energygrid, grid, gridspacing, z, NUMATOM);
}
printEnergy(energygrid, grid, gridspacing);
}
// main
int main(void) {
// initialize
srand(time(NULL));
initialize();
printAtoms();
energy();
return 0;
}
|
7b35df533ae6957d208f8a359d4bed702b3b0214.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Tests.cuh"
#include "hip/hip_runtime.h"
int ALL_TESTS = 0;
int PASSED_TESTS = 0;
hipEvent_t start;
hipEvent_t stop;
hipEvent_t startPack;
hipEvent_t stopPack;
void InitAllTests()
{
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
ALL_TESTS = 0;
PASSED_TESTS = 0;
}
void InitPack()
{
hipEventCreate(&startPack);
hipEventCreate(&stopPack);
hipEventRecord(startPack, 0);
}
void TestInit(const char *testName)
{
printf("%135s ", testName);
++ALL_TESTS;
}
void Assert(bool arg1, bool arg2, bool arg3, bool arg4, bool arg5, bool arg6, bool arg7, bool arg8, bool arg9, bool arg10)
{
if (arg1 && arg2 && arg3 && arg4 && arg5
&& arg6 && arg7 && arg8 && arg9 && arg10)
{
++PASSED_TESTS;
printf(" PASS\n");
}
else
{
printf(" FAILED\n");
}
}
void Output(std::string output)
{
printf("\nTest output:\n%s\n", output.c_str());
}
void FinalReport()
{
float time;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("===== FINAL REPORT =====\n\n");
printf("\nPASSED %d / %d\n", PASSED_TESTS, ALL_TESTS);
printf("ELAPSED TIME: %f\n\n", time);
}
void PackReport()
{
float time;
hipEventRecord(stopPack, 0);
hipEventSynchronize(stopPack);
hipEventElapsedTime(&time, startPack, stopPack);
printf("ELAPSED TIME: %f\n\n", time);
}
void PrintException(std::exception e)
{
printf(" EXCEPTION: %s\n", e.what());
}
void PrintException()
{
printf(" EXCEPTION\n");
}
|
7b35df533ae6957d208f8a359d4bed702b3b0214.cu
|
#include "Tests.cuh"
#include "cuda_runtime.h"
int ALL_TESTS = 0;
int PASSED_TESTS = 0;
cudaEvent_t start;
cudaEvent_t stop;
cudaEvent_t startPack;
cudaEvent_t stopPack;
void InitAllTests()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
ALL_TESTS = 0;
PASSED_TESTS = 0;
}
void InitPack()
{
cudaEventCreate(&startPack);
cudaEventCreate(&stopPack);
cudaEventRecord(startPack, 0);
}
void TestInit(const char *testName)
{
printf("%135s ", testName);
++ALL_TESTS;
}
void Assert(bool arg1, bool arg2, bool arg3, bool arg4, bool arg5, bool arg6, bool arg7, bool arg8, bool arg9, bool arg10)
{
if (arg1 && arg2 && arg3 && arg4 && arg5
&& arg6 && arg7 && arg8 && arg9 && arg10)
{
++PASSED_TESTS;
printf(" PASS\n");
}
else
{
printf(" FAILED\n");
}
}
void Output(std::string output)
{
printf("\nTest output:\n%s\n", output.c_str());
}
void FinalReport()
{
float time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("===== FINAL REPORT =====\n\n");
printf("\nPASSED %d / %d\n", PASSED_TESTS, ALL_TESTS);
printf("ELAPSED TIME: %f\n\n", time);
}
void PackReport()
{
float time;
cudaEventRecord(stopPack, 0);
cudaEventSynchronize(stopPack);
cudaEventElapsedTime(&time, startPack, stopPack);
printf("ELAPSED TIME: %f\n\n", time);
}
void PrintException(std::exception e)
{
printf(" EXCEPTION: %s\n", e.what());
}
void PrintException()
{
printf(" EXCEPTION\n");
}
|
1cc7b6f834949d4ae3ca0dff4990556a183a1f9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmergebicgstab2.cu normal z -> c, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *p,
magmaFloatComplex *r,
magmaFloatComplex *v,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * p[ d_colind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param
A magma_c_sparse_matrix
system matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_p magmaFloatComplex*
input vector p
@param
d_r magmaFloatComplex*
input vector r
@param
d_v magmaFloatComplex*
output vector v
@param
skp magmaFloatComplex*
array for parameters ( skp[0]=alpha )
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1( magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_p,
magmaFloatComplex *d_r,
magmaFloatComplex *d_v,
magmaFloatComplex *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, A.val, A.row, A.col, d_p, d_r, d_v, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * s[ d_colind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_s magmaFloatComplex*
input vector s
@param
d_t magmaFloatComplex*
output vector t
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_s,
magmaFloatComplex *d_t,
magmaFloatComplex *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, A.val, A.row, A.col, d_s, d_t, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1 );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex *rr,
magmaFloatComplex *r,
magmaFloatComplex *p,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *skp,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
n int
dimension n
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
rr magmaFloatComplex*
input vector rr
@param
r magmaFloatComplex*
input/output vector r
@param
p magmaFloatComplex*
input vector p
@param
s magmaFloatComplex*
input vector s
@param
t magmaFloatComplex*
input vector t
@param
x magmaFloatComplex*
output vector x
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
int n,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *rr,
magmaFloatComplex *r,
magmaFloatComplex *p,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
1cc7b6f834949d4ae3ca0dff4990556a183a1f9c.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmergebicgstab2.cu normal z -> c, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *p,
magmaFloatComplex *r,
magmaFloatComplex *v,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * p[ d_colind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param
A magma_c_sparse_matrix
system matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_p magmaFloatComplex*
input vector p
@param
d_r magmaFloatComplex*
input vector r
@param
d_v magmaFloatComplex*
output vector v
@param
skp magmaFloatComplex*
array for parameters ( skp[0]=alpha )
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1( magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_p,
magmaFloatComplex *d_r,
magmaFloatComplex *d_v,
magmaFloatComplex *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv1_kernel<<<Gs, Bs, Ms>>>
( n, A.val, A.row, A.col, d_p, d_r, d_v, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_alphakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * s[ d_colind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_s magmaFloatComplex*
input vector s
@param
d_t magmaFloatComplex*
output vector t
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_s,
magmaFloatComplex *d_t,
magmaFloatComplex *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv2_kernel<<<Gs, Bs, Ms>>>
( n, A.val, A.row, A.col, d_s, d_t, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1 );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_omegakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex *rr,
magmaFloatComplex *r,
magmaFloatComplex *p,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *skp,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
n int
dimension n
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
rr magmaFloatComplex*
input vector rr
@param
r magmaFloatComplex*
input/output vector r
@param
p magmaFloatComplex*
input vector p
@param
s magmaFloatComplex*
input vector s
@param
t magmaFloatComplex*
input vector t
@param
x magmaFloatComplex*
output vector x
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
int n,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *rr,
magmaFloatComplex *r,
magmaFloatComplex *p,
magmaFloatComplex *s,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
magma_cbicgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_betakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
0993b740c280cd7f7ea68fb1d1c89b31cd867f67.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate input array on host
// practice to declare host starting with h a and device with d
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare gpu memory pointers
float * d_in;
float * d_out;
//allocate gpu memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// array transfering to gpu
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back to cpu
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
};
// free memory
hipFree(d_in);
hipFree(d_out);
return 0;
}
|
0993b740c280cd7f7ea68fb1d1c89b31cd867f67.cu
|
#include <stdlib.h>
#include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate input array on host
// practice to declare host starting with h a and device with d
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare gpu memory pointers
float * d_in;
float * d_out;
//allocate gpu memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// array transfering to gpu
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back to cpu
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
};
// free memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
21a9d9d60b7ca35fae89800721fe00e3c145ab45.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
__global__
void reduceWithSharedMem(float * d_out,
const float * const d_in,
const size_t length,
const bool isMin)
{
// shared data is located in the kernel call
extern __shared__ float s_data[];
// compute thead index and id per block
const unsigned int tIndex = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned int tId = threadIdx.x;
if (tIndex >= length)
return;
// copy data from the global memory to the shared memory
s_data[tId] = d_in[tIndex];
__syncthreads();
// do reduction in shared memory
// i is the distance between two elements in the same thread block
for (unsigned int i = blockDim.x/2; i > 0; i >>= 1) {
// verify if both elments are in the range
if (tId < i) {
if (isMin) {
s_data[tId] = min(s_data[tId], s_data[tId+i]);
} else {
s_data[tId] = max(s_data[tId], s_data[tId+i]);
}
}
__syncthreads();
}
// only thread 0 writes result for this block back to global mem
if (tId == 0) {
if(isMin){
d_out[blockIdx.x] = min(s_data[0],s_data[2]);
}
else{
d_out[blockIdx.x] = max(s_data[0],s_data[2]);
}
}
}
__global__
void generate_histogram(const float * const d_logLuminance,
unsigned int * d_histogram,
float min_logLum,
float lumRange,
const size_t numBins,
const size_t length)
{
const unsigned int tIndex = threadIdx.x + blockDim.x * blockIdx.x;
if(tIndex >= length)
return;
int bin = (d_logLuminance[tIndex] - min_logLum)/lumRange * numBins;
//unsigned int bin = min(static_cast<unsigned int>(numBins-1),static_cast<unsigned int>((d_logLuminance[tIndex] - min_logLum) / lumRange * numBins));
atomicAdd(&(d_histogram[bin]),1);
}
__global__
void scan(unsigned int *histogram,
unsigned int *const d_cdf,
int n)
{
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
temp[pout*n + thid] = (thid > 0) ? histogram[thid-1] : 0;
__syncthreads();
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
__syncthreads();
}
d_cdf[thid] = temp[pout*n+thid];
}
/**
* Use the reduce algorithm to compute min/max value
*/
float reduce(const float * const d_in,
const size_t length,
const bool isMin)
{
float retVal = 0.f;
const int maxThreadsPerBlock = 1024;
int nThreads = maxThreadsPerBlock;
int nBlocks = ceil(length / nThreads);
// Allocate GPU memory
float * d_temp; // point to an array of floats
float * d_result; // final result
const size_t nBytes = length * sizeof(float);
checkCudaErrors( hipMalloc((void**) &d_temp, nBytes));
checkCudaErrors( hipMalloc((void**) &d_result, sizeof(float)));
checkCudaErrors( hipMemcpy(d_temp, d_in, nBytes, hipMemcpyDeviceToDevice));
// Step 1: reduce to one block
hipLaunchKernelGGL(( reduceWithSharedMem), dim3(nBlocks), dim3(nThreads), nThreads * sizeof(float), 0, d_temp, d_in, length, isMin);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 2: reduce from one block to a single element
nThreads = nBlocks; //96
nBlocks = 1;
//nThreads = 1024;
//nBlocks = ceil(nBlocks/nThreads);
hipLaunchKernelGGL(( reduceWithSharedMem), dim3(nBlocks), dim3(nThreads), nThreads * sizeof(float), 0, d_result, d_temp, length, isMin);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 3: copy to the retVal
checkCudaErrors( hipMemcpy(&retVal, d_result, sizeof(float), hipMemcpyDeviceToHost));
// Release GPU memory
hipFree(d_temp);
hipFree(d_result);
return retVal;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
const size_t length = numRows * numCols;
unsigned int * d_histogram;
float lumRange;
//first step
min_logLum = reduce(d_logLuminance, length, true);
max_logLum = reduce(d_logLuminance, length, false);
std::cout<<"min_logLum = "<<min_logLum<<std::endl;
std::cout<<"max_logLum = "<<max_logLum<<std::endl;
//second step
lumRange = max_logLum - min_logLum;
const dim3 blockSize(1024,1,1);
const dim3 gridSize(length/1024,1,1);
checkCudaErrors( hipMalloc((void**) &d_histogram, numBins * sizeof(unsigned int)));
checkCudaErrors( hipMemset(d_histogram,0,numBins * sizeof(unsigned int)));
hipLaunchKernelGGL(( generate_histogram), dim3(gridSize),dim3(blockSize), 0, 0, d_logLuminance,
d_histogram,
min_logLum,
lumRange,
numBins,
length);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( scan), dim3(1),dim3(1024),2048*sizeof(int), 0, d_histogram,
d_cdf,
numBins
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/*unsigned int h_histo[numBins];
hipMemcpy(h_histo,d_histogram,sizeof(unsigned int )*numBins,hipMemcpyDeviceToHost);
unsigned int h_cdf[numBins];
h_cdf[0] = 0;
for (size_t i = 1; i < numBins; ++i)
{
h_cdf[i] = h_cdf[i - 1] + h_histo[i - 1];
}
*/
//copy host data back to device
//hipMemcpy(d_cdf,h_cdf,sizeof(unsigned int )*numBins,hipMemcpyHostToDevice);
hipFree(d_histogram);
}
|
21a9d9d60b7ca35fae89800721fe00e3c145ab45.cu
|
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
__global__
void reduceWithSharedMem(float * d_out,
const float * const d_in,
const size_t length,
const bool isMin)
{
// shared data is located in the kernel call
extern __shared__ float s_data[];
// compute thead index and id per block
const unsigned int tIndex = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned int tId = threadIdx.x;
if (tIndex >= length)
return;
// copy data from the global memory to the shared memory
s_data[tId] = d_in[tIndex];
__syncthreads();
// do reduction in shared memory
// i is the distance between two elements in the same thread block
for (unsigned int i = blockDim.x/2; i > 0; i >>= 1) {
// verify if both elments are in the range
if (tId < i) {
if (isMin) {
s_data[tId] = min(s_data[tId], s_data[tId+i]);
} else {
s_data[tId] = max(s_data[tId], s_data[tId+i]);
}
}
__syncthreads();
}
// only thread 0 writes result for this block back to global mem
if (tId == 0) {
if(isMin){
d_out[blockIdx.x] = min(s_data[0],s_data[2]);
}
else{
d_out[blockIdx.x] = max(s_data[0],s_data[2]);
}
}
}
__global__
void generate_histogram(const float * const d_logLuminance,
unsigned int * d_histogram,
float min_logLum,
float lumRange,
const size_t numBins,
const size_t length)
{
const unsigned int tIndex = threadIdx.x + blockDim.x * blockIdx.x;
if(tIndex >= length)
return;
int bin = (d_logLuminance[tIndex] - min_logLum)/lumRange * numBins;
//unsigned int bin = min(static_cast<unsigned int>(numBins-1),static_cast<unsigned int>((d_logLuminance[tIndex] - min_logLum) / lumRange * numBins));
atomicAdd(&(d_histogram[bin]),1);
}
__global__
void scan(unsigned int *histogram,
unsigned int *const d_cdf,
int n)
{
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
temp[pout*n + thid] = (thid > 0) ? histogram[thid-1] : 0;
__syncthreads();
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
__syncthreads();
}
d_cdf[thid] = temp[pout*n+thid];
}
/**
* Use the reduce algorithm to compute min/max value
*/
float reduce(const float * const d_in,
const size_t length,
const bool isMin)
{
float retVal = 0.f;
const int maxThreadsPerBlock = 1024;
int nThreads = maxThreadsPerBlock;
int nBlocks = ceil(length / nThreads);
// Allocate GPU memory
float * d_temp; // point to an array of floats
float * d_result; // final result
const size_t nBytes = length * sizeof(float);
checkCudaErrors( cudaMalloc((void**) &d_temp, nBytes));
checkCudaErrors( cudaMalloc((void**) &d_result, sizeof(float)));
checkCudaErrors( cudaMemcpy(d_temp, d_in, nBytes, cudaMemcpyDeviceToDevice));
// Step 1: reduce to one block
reduceWithSharedMem<<<nBlocks, nThreads, nThreads * sizeof(float)>>>(d_temp, d_in, length, isMin);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 2: reduce from one block to a single element
nThreads = nBlocks; //96
nBlocks = 1;
//nThreads = 1024;
//nBlocks = ceil(nBlocks/nThreads);
reduceWithSharedMem<<<nBlocks, nThreads, nThreads * sizeof(float)>>>(d_result, d_temp, length, isMin);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 3: copy to the retVal
checkCudaErrors( cudaMemcpy(&retVal, d_result, sizeof(float), cudaMemcpyDeviceToHost));
// Release GPU memory
cudaFree(d_temp);
cudaFree(d_result);
return retVal;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
const size_t length = numRows * numCols;
unsigned int * d_histogram;
float lumRange;
//first step
min_logLum = reduce(d_logLuminance, length, true);
max_logLum = reduce(d_logLuminance, length, false);
std::cout<<"min_logLum = "<<min_logLum<<std::endl;
std::cout<<"max_logLum = "<<max_logLum<<std::endl;
//second step
lumRange = max_logLum - min_logLum;
const dim3 blockSize(1024,1,1);
const dim3 gridSize(length/1024,1,1);
checkCudaErrors( cudaMalloc((void**) &d_histogram, numBins * sizeof(unsigned int)));
checkCudaErrors( cudaMemset(d_histogram,0,numBins * sizeof(unsigned int)));
generate_histogram<<<gridSize,blockSize>>>(d_logLuminance,
d_histogram,
min_logLum,
lumRange,
numBins,
length);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
scan<<<1,1024,2048*sizeof(int)>>>(d_histogram,
d_cdf,
numBins
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/*unsigned int h_histo[numBins];
cudaMemcpy(h_histo,d_histogram,sizeof(unsigned int )*numBins,cudaMemcpyDeviceToHost);
unsigned int h_cdf[numBins];
h_cdf[0] = 0;
for (size_t i = 1; i < numBins; ++i)
{
h_cdf[i] = h_cdf[i - 1] + h_histo[i - 1];
}
*/
//copy host data back to device
//cudaMemcpy(d_cdf,h_cdf,sizeof(unsigned int )*numBins,cudaMemcpyHostToDevice);
cudaFree(d_histogram);
}
|
1a7dd5fe991bcf15a3d343e5981bd32b8181b7c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef fail
#!/bin/bash
# NOTE you can chmod 0755 this file and then execute it to compile (or just copy and paste)
gcc -o hashblock hashblock.c -lssl
exit 0
#endif
//#include <openssl/sha.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include "sha256.cu"
#define SHA256_DIGEST_SIZE 32
#define repeats 1000
#define NUM_BLOCKS 1024
// this is the block header, it is 80 bytes long (steal this code)
typedef struct block_header {
unsigned int version;
// dont let the "char" fool you, this is binary data not the human readable version
unsigned char prev_block[32];
unsigned char merkle_root[32];
unsigned int timestamp;
unsigned int bits;
unsigned int nonce;
} block_header;
double When()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
// we need a helper function to convert hex to binary, this function is unsafe and slow, but very readable (write something better)
void hex2bin(unsigned char* dest, const char* src)
{
int c, pos;
char buf[3];
pos=0;
c=0;
buf[2] = 0;
while(c < strlen(src))
{
// read in 2 characaters at a time
buf[0] = src[c++];
buf[1] = src[c++];
// convert them to a interger and recast to a char (uint8)
dest[pos++] = (unsigned char)strtol(buf, NULL, 16);
}
}
// this function is mostly useless in a real implementation, were only using it for demonstration purposes
__device__ void print_hash(unsigned char hash[])
{
int idx;
for (idx=0; idx < 32; idx++)
printf("%02x",hash[idx]);
printf("\n");
}
// this function swaps the byte ordering of binary data, this code is slow and bloated (write your own)
__device__ __host__ void byte_swap(unsigned char* data) {
int c;
unsigned char tmp[SHA256_DIGEST_SIZE];
c=0;
while(c<SHA256_DIGEST_SIZE)
{
tmp[c] = data[SHA256_DIGEST_SIZE-(c+1)];
c++;
}
c=0;
while(c<SHA256_DIGEST_SIZE)
{
data[c] = tmp[c];
c++;
}
}
__global__ void doCalc(unsigned char *dev_prev_block, unsigned char *dev_merkle_root, int seed) {
int i;
block_header header;
header.version = 2;
header.timestamp = 1392872245;
header.bits = 419520339;
// we are going to supply the block header with the values from the generation block 0
for(i=0;i<32;i++) {
header.prev_block[i] = dev_prev_block[i];
header.merkle_root[i] = dev_merkle_root[i];
//if(threadIdx.x == 0 && blockIdx.x == 0) printf("%u",header.prev_block[i]);
}
//if(threadIdx.x==0 && blockIdx.x == 0) printf("\n");
//we need a place to store the checksums
unsigned char hash1[32];
unsigned char hash2[32];
// you should be able to reuse these, but openssl sha256 is slow, so your probbally not going to implement this anyway
SHA256_CTX sha256_pass1, sha256_pass2;
header.nonce = (seed * blockDim.x * NUM_BLOCKS) + blockIdx.x * blockDim.x + threadIdx.x*repeats;
//if(threadIdx.x == 0) printf("nonce: %d\n", header.nonce);
// Use SSL's sha256 functions, it needs to be initialized
for(i=0;i<repeats;i++) {
sha256_init(&sha256_pass1);
// then you 'can' feed data to it in chuncks, but here were just making one pass cause the data is so small
sha256_update(&sha256_pass1, (unsigned char*)&header, sizeof(block_header));
// this ends the sha256 session and writes the checksum to hash1
sha256_final(&sha256_pass1,hash1);
// to display this, we want to swap the byte order to big endian
// byte_swap(hash1, SHA256_DIGEST_LENGTH); // this is for printing
// printf("Useless First Pass Checksum: ");
// hexdump(hash1, SHA256_DIGEST_LENGTH);
// but to calculate the checksum again, we need it in little endian, so swap it back
// byte_swap(hash1, SHA256_DIGEST_LENGTH);
//same as above
sha256_init(&sha256_pass2);
sha256_update(&sha256_pass2, hash1, SHA256_DIGEST_SIZE);
sha256_final(&sha256_pass2, hash2);
if ( header.nonce == 0 || header.nonce == 3 || header.nonce == 856192328 ) {
//hexdump((unsigned char*)&header, sizeof(block_header));
printf("%u:\n", header.nonce);
byte_swap(hash2);
printf("Target Second Pass Checksum: \n");
print_hash(hash2);
}
header.nonce++;
}
}
int main() {
int i = 0;
int blocksize = 16;
int threads = 128;
long long hashes = 0;
int counter = 0;
unsigned char *dev_merkle_root, *dev_prev_block;
unsigned char prev_block[32], merkle_root[32];
hex2bin(prev_block, "000000000000000117c80378b8da0e33559b5997f2ad55e2f7d18ec1975b9717");
hex2bin(merkle_root, "871714dcbae6c8193a2bb9b2a69fe1c0440399f38d94b3a0f1b447275a29978a");
byte_swap(prev_block);
byte_swap(merkle_root);
// for(i=0; i<32;i++) {
// printf("%u",prev_block[i]);
// }
// printf("\n");
hipMalloc((void**)&dev_prev_block, 32*sizeof(unsigned char));
hipMemcpy(dev_prev_block, &(prev_block), 32 * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_merkle_root, 32*sizeof(unsigned char));
hipMemcpy(dev_merkle_root, &(merkle_root), 32 * sizeof(unsigned char), hipMemcpyHostToDevice);
double start = When();
double timer = When() - start;
while ( timer < 60.0){
//printf("before kernel call\n");
hipLaunchKernelGGL(( doCalc), dim3(blocksize), dim3(threads) , 0, 0, dev_prev_block, dev_merkle_root, counter);
hashes += blocksize*threads*repeats;
counter++;
timer = When() - start;
//printf("%d iterations\n",counter);
hipDeviceSynchronize();
}
// printf("hashes: %lld\n", hashes);
// printf("seconds: %f\n", When() - start);
printf("number of hashs per second = %lld\n",(long long) (hashes / (When() - start)) );
return 0;
}
|
1a7dd5fe991bcf15a3d343e5981bd32b8181b7c0.cu
|
#ifdef fail
#!/bin/bash
# NOTE you can chmod 0755 this file and then execute it to compile (or just copy and paste)
gcc -o hashblock hashblock.c -lssl
exit 0
#endif
//#include <openssl/sha.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include "sha256.cu"
#define SHA256_DIGEST_SIZE 32
#define repeats 1000
#define NUM_BLOCKS 1024
// this is the block header, it is 80 bytes long (steal this code)
typedef struct block_header {
unsigned int version;
// dont let the "char" fool you, this is binary data not the human readable version
unsigned char prev_block[32];
unsigned char merkle_root[32];
unsigned int timestamp;
unsigned int bits;
unsigned int nonce;
} block_header;
double When()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
// we need a helper function to convert hex to binary, this function is unsafe and slow, but very readable (write something better)
void hex2bin(unsigned char* dest, const char* src)
{
int c, pos;
char buf[3];
pos=0;
c=0;
buf[2] = 0;
while(c < strlen(src))
{
// read in 2 characaters at a time
buf[0] = src[c++];
buf[1] = src[c++];
// convert them to a interger and recast to a char (uint8)
dest[pos++] = (unsigned char)strtol(buf, NULL, 16);
}
}
// this function is mostly useless in a real implementation, were only using it for demonstration purposes
__device__ void print_hash(unsigned char hash[])
{
int idx;
for (idx=0; idx < 32; idx++)
printf("%02x",hash[idx]);
printf("\n");
}
// this function swaps the byte ordering of binary data, this code is slow and bloated (write your own)
__device__ __host__ void byte_swap(unsigned char* data) {
int c;
unsigned char tmp[SHA256_DIGEST_SIZE];
c=0;
while(c<SHA256_DIGEST_SIZE)
{
tmp[c] = data[SHA256_DIGEST_SIZE-(c+1)];
c++;
}
c=0;
while(c<SHA256_DIGEST_SIZE)
{
data[c] = tmp[c];
c++;
}
}
__global__ void doCalc(unsigned char *dev_prev_block, unsigned char *dev_merkle_root, int seed) {
int i;
block_header header;
header.version = 2;
header.timestamp = 1392872245;
header.bits = 419520339;
// we are going to supply the block header with the values from the generation block 0
for(i=0;i<32;i++) {
header.prev_block[i] = dev_prev_block[i];
header.merkle_root[i] = dev_merkle_root[i];
//if(threadIdx.x == 0 && blockIdx.x == 0) printf("%u",header.prev_block[i]);
}
//if(threadIdx.x==0 && blockIdx.x == 0) printf("\n");
//we need a place to store the checksums
unsigned char hash1[32];
unsigned char hash2[32];
// you should be able to reuse these, but openssl sha256 is slow, so your probbally not going to implement this anyway
SHA256_CTX sha256_pass1, sha256_pass2;
header.nonce = (seed * blockDim.x * NUM_BLOCKS) + blockIdx.x * blockDim.x + threadIdx.x*repeats;
//if(threadIdx.x == 0) printf("nonce: %d\n", header.nonce);
// Use SSL's sha256 functions, it needs to be initialized
for(i=0;i<repeats;i++) {
sha256_init(&sha256_pass1);
// then you 'can' feed data to it in chuncks, but here were just making one pass cause the data is so small
sha256_update(&sha256_pass1, (unsigned char*)&header, sizeof(block_header));
// this ends the sha256 session and writes the checksum to hash1
sha256_final(&sha256_pass1,hash1);
// to display this, we want to swap the byte order to big endian
// byte_swap(hash1, SHA256_DIGEST_LENGTH); // this is for printing
// printf("Useless First Pass Checksum: ");
// hexdump(hash1, SHA256_DIGEST_LENGTH);
// but to calculate the checksum again, we need it in little endian, so swap it back
// byte_swap(hash1, SHA256_DIGEST_LENGTH);
//same as above
sha256_init(&sha256_pass2);
sha256_update(&sha256_pass2, hash1, SHA256_DIGEST_SIZE);
sha256_final(&sha256_pass2, hash2);
if ( header.nonce == 0 || header.nonce == 3 || header.nonce == 856192328 ) {
//hexdump((unsigned char*)&header, sizeof(block_header));
printf("%u:\n", header.nonce);
byte_swap(hash2);
printf("Target Second Pass Checksum: \n");
print_hash(hash2);
}
header.nonce++;
}
}
int main() {
int i = 0;
int blocksize = 16;
int threads = 128;
long long hashes = 0;
int counter = 0;
unsigned char *dev_merkle_root, *dev_prev_block;
unsigned char prev_block[32], merkle_root[32];
hex2bin(prev_block, "000000000000000117c80378b8da0e33559b5997f2ad55e2f7d18ec1975b9717");
hex2bin(merkle_root, "871714dcbae6c8193a2bb9b2a69fe1c0440399f38d94b3a0f1b447275a29978a");
byte_swap(prev_block);
byte_swap(merkle_root);
// for(i=0; i<32;i++) {
// printf("%u",prev_block[i]);
// }
// printf("\n");
cudaMalloc((void**)&dev_prev_block, 32*sizeof(unsigned char));
cudaMemcpy(dev_prev_block, &(prev_block), 32 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_merkle_root, 32*sizeof(unsigned char));
cudaMemcpy(dev_merkle_root, &(merkle_root), 32 * sizeof(unsigned char), cudaMemcpyHostToDevice);
double start = When();
double timer = When() - start;
while ( timer < 60.0){
//printf("before kernel call\n");
doCalc<<< blocksize, threads >>>(dev_prev_block, dev_merkle_root, counter);
hashes += blocksize*threads*repeats;
counter++;
timer = When() - start;
//printf("%d iterations\n",counter);
cudaDeviceSynchronize();
}
// printf("hashes: %lld\n", hashes);
// printf("seconds: %f\n", When() - start);
printf("number of hashs per second = %lld\n",(long long) (hashes / (When() - start)) );
return 0;
}
|
1df6be381fcc3c0594477f2b76c940c8f4342ccd.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define pn(x) printf("%6.3f ", (double)x)
using namespace std;
template <typename T>
class gpuMat
{
bool blank = true;
public:
T* h_elems = nullptr;
T* d_elems = nullptr;
int rows, cols;
int2 *d_size;
gpuMat();
gpuMat(int rows, int cols);
void create(int rows, int cols);
~gpuMat();
T& operator()(int row, int col = 0);
void print(bool start = true);
void copy2Device();
void copy2Host();
void ToFile(string fileName);
private:
void SetSize();
};
template <typename T>
gpuMat<T>::gpuMat()
{
blank = true;
}
template <typename T>
void gpuMat<T>::SetSize()
{
// Initialize d_size
int2 h_size;
h_size.x = cols;
h_size.y = rows;
hipMalloc(&d_size, sizeof(int2));
hipMemcpy(d_size, &h_size, sizeof(int2), hipMemcpyHostToDevice);
}
template <typename T>
gpuMat<T>::gpuMat(int rows, int cols)
{
create(rows, cols);
}
template <typename T>
void gpuMat<T>::create(int rows, int cols)
{
if (!blank){
delete[] h_elems;
hipFree(d_elems);
}
blank = false;
this->rows = rows;
this->cols = cols;
h_elems = new T[rows*cols];
hipError_t err = hipMalloc(&d_elems, rows*cols*sizeof(double));
if (err != hipSuccess){
cout << "[gpuMat::ctor]Memory allocation on GPU failed." << endl;
}
SetSize();
}
template <typename T>
gpuMat<T>::~gpuMat()
{
if (!blank){
cout << "[gpuMat::dtor]Destroying gpuMat[auto]" << endl;
delete[] h_elems;
hipFree(d_elems);
}
else{
cout << "[gpuMat::dtor] object was blank" << endl;
}
hipFree(d_size);
}
template <typename T>
T& gpuMat<T>::operator()(int row, int col)
{
return h_elems[col*rows + row];
}
template <typename T>
void gpuMat<T>::copy2Device()
{
hipMemcpy(d_elems, h_elems, rows*cols*sizeof(T), hipMemcpyHostToDevice);
}
template <typename T>
void gpuMat<T>::copy2Host()
{
hipMemcpy(h_elems, d_elems, rows*cols*sizeof(T), hipMemcpyDeviceToHost);
}
template <typename T>
void gpuMat<T>::print(bool start)
{
// set start = false if you want to print the bottom-right corner
// of a large matrix
cout << endl;
if (start){
for (int i = 0; i < min(10, rows); i++)
{
for (int j = 0; j < min(10, cols); j++)
{
pn((*this)(i, j));
}
cout << endl;
}
}
else{
for (int i = max(0, rows - 10); i < rows; i++)
{
for (int j = max(10, cols - 10); j < cols; j++)
{
pn((*this)(i, j));
}
cout << endl;
}
}
}
template <typename T>
void gpuMat<T>::ToFile(string filename)
{
this->copy2Host();
FILE *fh;
fh = fopen(filename.c_str(), "w");
if (fh != NULL){
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
fprintf(fh, "%6.2f", (*this)(i, j));
//os << (*this)(i, j) << ",";
//os << j*rows + i << ",";
//os << "[" << j*rows + i << "]" << h_elems[j*rows + i] << ",";
}
//os << endl;
fprintf(fh, "\n");
}
fclose(fh);
}
else{
cout << filename << " failed to open..." << endl;
}
}
|
1df6be381fcc3c0594477f2b76c940c8f4342ccd.cu
|
#pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define pn(x) printf("%6.3f ", (double)x)
using namespace std;
template <typename T>
class gpuMat
{
bool blank = true;
public:
T* h_elems = nullptr;
T* d_elems = nullptr;
int rows, cols;
int2 *d_size;
gpuMat();
gpuMat(int rows, int cols);
void create(int rows, int cols);
~gpuMat();
T& operator()(int row, int col = 0);
void print(bool start = true);
void copy2Device();
void copy2Host();
void ToFile(string fileName);
private:
void SetSize();
};
template <typename T>
gpuMat<T>::gpuMat()
{
blank = true;
}
template <typename T>
void gpuMat<T>::SetSize()
{
// Initialize d_size
int2 h_size;
h_size.x = cols;
h_size.y = rows;
cudaMalloc(&d_size, sizeof(int2));
cudaMemcpy(d_size, &h_size, sizeof(int2), cudaMemcpyHostToDevice);
}
template <typename T>
gpuMat<T>::gpuMat(int rows, int cols)
{
create(rows, cols);
}
template <typename T>
void gpuMat<T>::create(int rows, int cols)
{
if (!blank){
delete[] h_elems;
cudaFree(d_elems);
}
blank = false;
this->rows = rows;
this->cols = cols;
h_elems = new T[rows*cols];
cudaError_t err = cudaMalloc(&d_elems, rows*cols*sizeof(double));
if (err != cudaSuccess){
cout << "[gpuMat::ctor]Memory allocation on GPU failed." << endl;
}
SetSize();
}
template <typename T>
gpuMat<T>::~gpuMat()
{
if (!blank){
cout << "[gpuMat::dtor]Destroying gpuMat[auto]" << endl;
delete[] h_elems;
cudaFree(d_elems);
}
else{
cout << "[gpuMat::dtor] object was blank" << endl;
}
cudaFree(d_size);
}
template <typename T>
T& gpuMat<T>::operator()(int row, int col)
{
return h_elems[col*rows + row];
}
template <typename T>
void gpuMat<T>::copy2Device()
{
cudaMemcpy(d_elems, h_elems, rows*cols*sizeof(T), cudaMemcpyHostToDevice);
}
template <typename T>
void gpuMat<T>::copy2Host()
{
cudaMemcpy(h_elems, d_elems, rows*cols*sizeof(T), cudaMemcpyDeviceToHost);
}
template <typename T>
void gpuMat<T>::print(bool start)
{
// set start = false if you want to print the bottom-right corner
// of a large matrix
cout << endl;
if (start){
for (int i = 0; i < min(10, rows); i++)
{
for (int j = 0; j < min(10, cols); j++)
{
pn((*this)(i, j));
}
cout << endl;
}
}
else{
for (int i = max(0, rows - 10); i < rows; i++)
{
for (int j = max(10, cols - 10); j < cols; j++)
{
pn((*this)(i, j));
}
cout << endl;
}
}
}
template <typename T>
void gpuMat<T>::ToFile(string filename)
{
this->copy2Host();
FILE *fh;
fh = fopen(filename.c_str(), "w");
if (fh != NULL){
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
fprintf(fh, "%6.2f", (*this)(i, j));
//os << (*this)(i, j) << ",";
//os << j*rows + i << ",";
//os << "[" << j*rows + i << "]" << h_elems[j*rows + i] << ",";
}
//os << endl;
fprintf(fh, "\n");
}
fclose(fh);
}
else{
cout << filename << " failed to open..." << endl;
}
}
|
e7b555a56dfb317703361d5610f12cdf46841a97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
__syncthreads();
// perform first level of reduction,
// reading from global memory, writing to shared memory
__shared__ float preData[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sdata[BLOCK_SIZE];
__shared__ float mulData[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
//unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int i = threadIdx.x;
mulData[i] = mul[i];
__syncthreads();
float mySum = 0;
if (i + BLOCK_SIZE < width)
for (int j = 0; j < BLOCK_SIZE; j++) {
mySum += in[start + i + j * width];
}
mySum *= mulData[i];
sdata[tid] = mySum;
__syncthreads();
//printf("1 TID %d sum %f\n", i, mySum);
if ((BLOCK_SIZE >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((BLOCK_SIZE >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((BLOCK_SIZE >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((BLOCK_SIZE >= 2) && (tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
//printf("2 TID %d sum %f\n", i, mySum);
// write result for this block to global mem
//if (tid == 0) g_odata[blockIdx.x] = mySum
__shared__ float total;
if (tid == 0) total = mySum;
__syncthreads();
//if (tid == 0) printf("total is %f\n", total);
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / total;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
hipHostMalloc((void**)&hA_in, SIZE * sizeof(float));
hipHostMalloc((void**)&hA_out, SIZE * sizeof(float));
hipHostMalloc((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
hipDeviceSynchronize();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
norm << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
/*printf("\n");
for (int i = 0; i < SIZE; i++) {
printf("%d ", hA_out[i]);
if (i % 16 == 0) {
printf("\n");
}
}*/
}
|
e7b555a56dfb317703361d5610f12cdf46841a97.cu
|
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
__syncthreads();
// perform first level of reduction,
// reading from global memory, writing to shared memory
__shared__ float preData[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sdata[BLOCK_SIZE];
__shared__ float mulData[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
//unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int i = threadIdx.x;
mulData[i] = mul[i];
__syncthreads();
float mySum = 0;
if (i + BLOCK_SIZE < width)
for (int j = 0; j < BLOCK_SIZE; j++) {
mySum += in[start + i + j * width];
}
mySum *= mulData[i];
sdata[tid] = mySum;
__syncthreads();
//printf("1 TID %d sum %f\n", i, mySum);
if ((BLOCK_SIZE >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((BLOCK_SIZE >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((BLOCK_SIZE >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((BLOCK_SIZE >= 2) && (tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
//printf("2 TID %d sum %f\n", i, mySum);
// write result for this block to global mem
//if (tid == 0) g_odata[blockIdx.x] = mySum
__shared__ float total;
if (tid == 0) total = mySum;
__syncthreads();
//if (tid == 0) printf("total is %f\n", total);
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / total;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / total;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
cudaMallocHost((void**)&hA_in, SIZE * sizeof(float));
cudaMallocHost((void**)&hA_out, SIZE * sizeof(float));
cudaMallocHost((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
cudaDeviceSynchronize();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
norm << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
/*printf("\n");
for (int i = 0; i < SIZE; i++) {
printf("%d ", hA_out[i]);
if (i % 16 == 0) {
printf("\n");
}
}*/
}
|
2cba0a94090d68ee86d14467b9e2eccafbddf06a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <hip/hip_runtime.h>
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt count,const PetscInt *idx,PetscInt bs,const void *unpacked,void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const Type *u = (const Type*)unpacked;
Type *p = (Type*)packed;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) p[tid*MBS+i] = u[tid*MBS+i];}
else {for (i=0; i<MBS; i++) p[tid*MBS+i] = u[idx[tid]*MBS+i];}
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt count,const PetscInt *idx,PetscInt bs,void *unpacked,const void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
Type *u = (Type*)unpacked;
const Type *p = (const Type*)packed;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) op(u[tid*MBS+i], p[tid*MBS+i]);}
else {for (i=0; i<MBS; i++) op(u[idx[tid]*MBS+i],p[tid*MBS+i]);}
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt count,const PetscInt *idx,PetscInt bs,void *unpacked,void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
Type *u = (Type*)unpacked,*p;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) p[tid*MBS+i] = op(u[tid*MBS+i],p[tid*MBS+i]);}
else {for (i=0; i<MBS; i++) p[tid*MBS+i] = op(u[idx[tid]*MBS+i],p[tid*MBS+i]);}
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPIU_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((unsigned long long int*)address,__double_as_longlong(val)));}
#endif
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicExch(PetscInt* address,PetscInt val) {return (PetscInt)(atomicExch((unsigned long long int*)address,(unsigned long long int)val));}
#endif
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicAdd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAdd((unsigned long long int*)address,(unsigned long long int)val);}
#endif
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static int atomicMult(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val*(PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicMin(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMin(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicMax(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMax(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(PETSC_USE_64BIT_INDICES)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 350)
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val & (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicOr(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val | (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val ^ (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#else
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAnd((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicOr (PetscInt* address,PetscInt val) {return (PetscInt)atomicOr ((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val) {return (PetscInt)atomicXor((unsigned long long int*)address,(unsigned long long int)val);}
#endif
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
unsigned long long int *address_as_ull = (unsigned long long int*)(&x);
unsigned long long int old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions on cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,const void *unpacked,void *packed)
{
hipError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
hipLaunchKernelGGL(( d_Pack<Type,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, count,idx,link->bs,unpacked,packed);
err = hipGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,void *unpacked,const void *packed)
{
hipError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
hipLaunchKernelGGL(( d_UnpackAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, count,idx,link->bs,unpacked,packed);
err = hipGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,void *unpacked,void *packed)
{
hipError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
hipLaunchKernelGGL(( d_FetchAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, count,idx,link->bs,unpacked,packed);
err = hipGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndMin = FetchAndOp <Type,Min<Type> ,BS,EQ>;
link->d_FetchAndMax = FetchAndOp <Type,Max<Type> ,BS,EQ>;
/* Pack() is always data race free */
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndInsert = FetchAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_FetchAndMult = FetchAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_FetchAndMin = FetchAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_FetchAndMax = FetchAndOp <Type,AtomicMax<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFPack link) {
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndInsert = FetchAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_FetchAndMult = FetchAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_FetchAndMin = FetchAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_FetchAndMax = FetchAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndLAND = FetchAndOp <Type,AtomicLAND<Type> ,BS,EQ>;
link->da_FetchAndLOR = FetchAndOp <Type,AtomicLOR<Type> ,BS,EQ>;
link->da_FetchAndLXOR = FetchAndOp <Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_FetchAndBAND = FetchAndOp <Type,AtomicBAND<Type> ,BS,EQ>;
link->da_FetchAndBOR = FetchAndOp <Type,AtomicBOR<Type> ,BS,EQ>;
link->da_FetchAndBXOR = FetchAndOp <Type,AtomicBXOR<Type> ,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFPack link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndMin = FetchAndOp <Type,Min<Type> ,BS,EQ>;
link->d_FetchAndMax = FetchAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndLAND = FetchAndOp <Type,LAND<Type> ,BS,EQ>;
link->d_FetchAndLOR = FetchAndOp <Type,LOR<Type> ,BS,EQ>;
link->d_FetchAndLXOR = FetchAndOp <Type,LXOR<Type> ,BS,EQ>;
link->d_FetchAndBAND = FetchAndOp <Type,BAND<Type> ,BS,EQ>;
link->d_FetchAndBOR = FetchAndOp <Type,BOR<Type> ,BS,EQ>;
link->d_FetchAndBXOR = FetchAndOp <Type,BXOR<Type> ,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFPack link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,1,1>;
link->d_FetchAndMinloc = FetchAndOp <Type,Minloc<Type>,1,1>;
link->d_FetchAndMinloc = FetchAndOp <Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/*====================================================================================*/
/* Main driver to init MPI datatype on device */
/*====================================================================================*/
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFPackSetUp_Device(PetscSF sf,PetscSFPack link,MPI_Datatype unit)
{
PetscErrorCode ierr;
hipError_t err;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt) {
if (nPetscInt == 8) PackInit_IntegerType<PetscInt,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<PetscInt,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<PetscInt,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<PetscInt,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<PetscInt,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<PetscInt,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<PetscInt,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<PetscInt,1,0>(link);
#if defined(PETSC_USE_64BIT_INDICES)
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
#endif
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRQ(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf_use_default_cuda_stream) {err = hipStreamCreate(&link->stream);CHKERRCUDA(err);}
if (!sf->MAX_CORESIDENT_THREADS) {
int device;
struct hipDeviceProp_t props;
err = hipGetDevice(&device);CHKERRCUDA(err);
err = hipGetDeviceProperties(&props,device);CHKERRCUDA(err);
sf->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor;
}
link->MAX_CORESIDENT_THREADS = sf->MAX_CORESIDENT_THREADS;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
|
2cba0a94090d68ee86d14467b9e2eccafbddf06a.cu
|
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <cuda_runtime.h>
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt count,const PetscInt *idx,PetscInt bs,const void *unpacked,void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const Type *u = (const Type*)unpacked;
Type *p = (Type*)packed;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) p[tid*MBS+i] = u[tid*MBS+i];}
else {for (i=0; i<MBS; i++) p[tid*MBS+i] = u[idx[tid]*MBS+i];}
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt count,const PetscInt *idx,PetscInt bs,void *unpacked,const void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
Type *u = (Type*)unpacked;
const Type *p = (const Type*)packed;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) op(u[tid*MBS+i], p[tid*MBS+i]);}
else {for (i=0; i<MBS; i++) op(u[idx[tid]*MBS+i],p[tid*MBS+i]);}
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt count,const PetscInt *idx,PetscInt bs,void *unpacked,void *packed)
{
PetscInt i,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
Type *u = (Type*)unpacked,*p;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!idx) {for (i=0; i<MBS; i++) p[tid*MBS+i] = op(u[tid*MBS+i],p[tid*MBS+i]);}
else {for (i=0; i<MBS; i++) p[tid*MBS+i] = op(u[idx[tid]*MBS+i],p[tid*MBS+i]);}
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPIU_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((unsigned long long int*)address,__double_as_longlong(val)));}
#endif
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicExch(PetscInt* address,PetscInt val) {return (PetscInt)(atomicExch((unsigned long long int*)address,(unsigned long long int)val));}
#endif
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicAdd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAdd((unsigned long long int*)address,(unsigned long long int)val);}
#endif
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static int atomicMult(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val*(PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicMin(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMin(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicMax(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMax(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(PETSC_USE_64BIT_INDICES)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 350)
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val & (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicOr(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val | (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val ^ (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#else
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAnd((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicOr (PetscInt* address,PetscInt val) {return (PetscInt)atomicOr ((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val) {return (PetscInt)atomicXor((unsigned long long int*)address,(unsigned long long int)val);}
#endif
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
unsigned long long int *address_as_ull = (unsigned long long int*)(&x);
unsigned long long int old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions on cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,const void *unpacked,void *packed)
{
cudaError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
d_Pack<Type,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(count,idx,link->bs,unpacked,packed);
err = cudaGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,void *unpacked,const void *packed)
{
cudaError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
d_UnpackAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(count,idx,link->bs,unpacked,packed);
err = cudaGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscInt count,const PetscInt *idx,PetscSFPack link,PetscSFPackOpt opt,void *unpacked,void *packed)
{
cudaError_t err;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscFunctionBegin;
if (nblocks > link->MAX_CORESIDENT_THREADS/nthreads) nblocks = link->MAX_CORESIDENT_THREADS/nthreads;
d_FetchAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(count,idx,link->bs,unpacked,packed);
err = cudaGetLastError();CHKERRCUDA(err);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndMin = FetchAndOp <Type,Min<Type> ,BS,EQ>;
link->d_FetchAndMax = FetchAndOp <Type,Max<Type> ,BS,EQ>;
/* Pack() is always data race free */
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndInsert = FetchAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_FetchAndMult = FetchAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_FetchAndMin = FetchAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_FetchAndMax = FetchAndOp <Type,AtomicMax<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFPack link) {
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndInsert = FetchAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_FetchAndMult = FetchAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_FetchAndMin = FetchAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_FetchAndMax = FetchAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndLAND = FetchAndOp <Type,AtomicLAND<Type> ,BS,EQ>;
link->da_FetchAndLOR = FetchAndOp <Type,AtomicLOR<Type> ,BS,EQ>;
link->da_FetchAndLXOR = FetchAndOp <Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_FetchAndBAND = FetchAndOp <Type,AtomicBAND<Type> ,BS,EQ>;
link->da_FetchAndBOR = FetchAndOp <Type,AtomicBOR<Type> ,BS,EQ>;
link->da_FetchAndBXOR = FetchAndOp <Type,AtomicBXOR<Type> ,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFPack link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndMin = FetchAndOp <Type,Min<Type> ,BS,EQ>;
link->d_FetchAndMax = FetchAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndLAND = FetchAndOp <Type,LAND<Type> ,BS,EQ>;
link->d_FetchAndLOR = FetchAndOp <Type,LOR<Type> ,BS,EQ>;
link->d_FetchAndLXOR = FetchAndOp <Type,LXOR<Type> ,BS,EQ>;
link->d_FetchAndBAND = FetchAndOp <Type,BAND<Type> ,BS,EQ>;
link->d_FetchAndBOR = FetchAndOp <Type,BOR<Type> ,BS,EQ>;
link->d_FetchAndBXOR = FetchAndOp <Type,BXOR<Type> ,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_FetchAndMult = FetchAndOp <Type,Mult<Type> ,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFPack link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,1,1>;
link->d_FetchAndMinloc = FetchAndOp <Type,Minloc<Type>,1,1>;
link->d_FetchAndMinloc = FetchAndOp <Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFPack link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_FetchAndInsert = FetchAndOp <Type,Insert<Type>,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/*====================================================================================*/
/* Main driver to init MPI datatype on device */
/*====================================================================================*/
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFPackSetUp_Device(PetscSF sf,PetscSFPack link,MPI_Datatype unit)
{
PetscErrorCode ierr;
cudaError_t err;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt) {
if (nPetscInt == 8) PackInit_IntegerType<PetscInt,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<PetscInt,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<PetscInt,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<PetscInt,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<PetscInt,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<PetscInt,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<PetscInt,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<PetscInt,1,0>(link);
#if defined(PETSC_USE_64BIT_INDICES)
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
#endif
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRQ(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf_use_default_cuda_stream) {err = cudaStreamCreate(&link->stream);CHKERRCUDA(err);}
if (!sf->MAX_CORESIDENT_THREADS) {
int device;
struct cudaDeviceProp props;
err = cudaGetDevice(&device);CHKERRCUDA(err);
err = cudaGetDeviceProperties(&props,device);CHKERRCUDA(err);
sf->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor;
}
link->MAX_CORESIDENT_THREADS = sf->MAX_CORESIDENT_THREADS;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
|
f14f3643013f663ac990d00d0e145acb886b24bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_b;
int xdim0_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_b;
int ydim0_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_b;
int xdim1_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_b;
int ydim1_update_halo_kernel5_plus_2_b_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_b*(y)+xdim0_update_halo_kernel5_plus_2_b*ydim0_update_halo_kernel5_plus_2_b*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_b*(y)+xdim1_update_halo_kernel5_plus_2_b*ydim1_update_halo_kernel5_plus_2_b*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_b(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,-2,0)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,-2,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_b(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_2_b + idx_z * 1 * xdim0_update_halo_kernel5_plus_2_b * ydim0_update_halo_kernel5_plus_2_b;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_2_b + idx_z * 1 * xdim1_update_halo_kernel5_plus_2_b * ydim1_update_halo_kernel5_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_2_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(116,"update_halo_kernel5_plus_2_b");
OPS_kernels[116].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel5_plus_2_b_h || ydim0 != ydim0_update_halo_kernel5_plus_2_b_h || xdim1 != xdim1_update_halo_kernel5_plus_2_b_h || ydim1 != ydim1_update_halo_kernel5_plus_2_b_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_b, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_b_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_b, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_b_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_b, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_b_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_b, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[116].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[116].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[116].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[116].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
f14f3643013f663ac990d00d0e145acb886b24bd.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_b;
int xdim0_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_b;
int ydim0_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_b;
int xdim1_update_halo_kernel5_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_b;
int ydim1_update_halo_kernel5_plus_2_b_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_b*(y)+xdim0_update_halo_kernel5_plus_2_b*ydim0_update_halo_kernel5_plus_2_b*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_b*(y)+xdim1_update_halo_kernel5_plus_2_b*ydim1_update_halo_kernel5_plus_2_b*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_b(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,-2,0)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,-2,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_b(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_2_b + idx_z * 1 * xdim0_update_halo_kernel5_plus_2_b * ydim0_update_halo_kernel5_plus_2_b;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_2_b + idx_z * 1 * xdim1_update_halo_kernel5_plus_2_b * ydim1_update_halo_kernel5_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_2_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(116,"update_halo_kernel5_plus_2_b");
OPS_kernels[116].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel5_plus_2_b_h || ydim0 != ydim0_update_halo_kernel5_plus_2_b_h || xdim1 != xdim1_update_halo_kernel5_plus_2_b_h || ydim1 != ydim1_update_halo_kernel5_plus_2_b_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_b, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_b_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_b, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_b_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_b, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_b_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_b, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[116].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_2_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[116].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[116].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[116].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
cd9385975b7ce955080618b39e073e6763f5a9db.hip
|
// !!! This is a file automatically generated by hipify!!!
/** \file "template.cu" : implements the kernel for the "template" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_VISCO
#define BLOCK_X 64
// BLOCK_Y : in radius
#define BLOCK_Y 4
#define invdiffrmed CRadiiStuff[ igp]
#define cs2 CRadiiStuff[(nr+1)*1 + igp]
#define invrmed CRadiiStuff[(nr+1)*2 + igp]
#define invrmedm CRadiiStuff[(nr+1)*2 + igp-1]
#define invrinf CRadiiStuff[(nr+1)*3 + igp]
#define rinf CRadiiStuff[(nr+1)*4 + igp]
#define rmed CRadiiStuff[(nr+1)*6 + igp]
#define rmedm CRadiiStuff[(nr+1)*6 + igp-1]
#define rsup CRadiiStuff[(nr+1)*8 + igp]
#define invdiffrsup CRadiiStuff[(nr+1)*10+ igp]
#define visco CRadiiStuff[(nr+1)*12+ igp]
#define alphaval CRadiiStuff[(nr+1)*13+ igp]
#define omega CRadiiStuff[(nr+1)*14+ igp]
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
// [RZS-MOD]
extern double SGAccInnerEdge, SGAccOuterEdge;
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
// calcualte adiabatic alpha viscosity value at each cell
__global__ void kernel_calc_alpha_visco (double *dens,
double *energy,
double *viscosity,
double alpha,
double adiabatic_index,
int ns,
int nr,
int pitch) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int idg = jg+ig*pitch;
const int igp = ig;
const double csa2 = adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg];
// viscosity[idg] = alpha * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[idg] = alpha * csa2 / omega;
}
// calcualte adiabatic alpha viscosity value at each cell
__global__ void kernel_calc_dze_alpha_visco (double *dens,
double *energy,
double *viscosity,
double adiabatic_index,
double viscmod,
double viscmodr1,
double viscmoddeltar1,
double viscmodr2,
double viscmoddeltar2,
int ns,
int nr,
int pitch) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int idg = jg+ig*pitch;
const int igp = ig;
const double csa2 = adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg];
//viscosity[idg] = alphaval * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[idg] = alphaval * csa2 / omega; // aplha*cs^2/Omega
}
// calcualte density dependent alpha viscosity value at each cell
__global__ void kernel_calc_adaptive_alpha_visco (double *dens,
double *energy,
double *viscosity,
double alpha_active,
double alpha_dead,
double alpha_smooth,
double sigma_thresh,
double adiabatic_index,
double aspect_ratio,
int ns,
int nr,
int pitch,
bool adiabatic) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int m = jg+ig*pitch;
const int igp = ig;
if (adiabatic) {
const double rho = dens[m];
const double alpha = (1.0-tanh ((rho-sigma_thresh) / (sigma_thresh * alpha_smooth * aspect_ratio))) * alpha_active * 0.5 + alpha_dead;
const double mycs2 = adiabatic_index*(adiabatic_index-1.0)*energy[m]/rho;
//viscosity[m] = alpha * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[m] = alpha * mycs2 / omega; // aplha*cs^2/Omega
}
else {
const double alpha = (1.0-tanh ((dens[m]-sigma_thresh) / (sigma_thresh * alpha_smooth * aspect_ratio))) * alpha_active * 0.5 + alpha_dead;
viscosity[m] = alpha * cs2 * pow(rmed, 1.5); // aplha*cs^2/Omega
/*const double rho = dens[m];
const double sigma_dead = rho-sigma_thresh;
//const double alpha = (sigma_dead > 0 ? (alpha_active*sigma_thresh+sigma_dead*alpha_dead)/rho : alpha_active);
const double alpha = (sigma_dead > 0 ? alpha_dead+alpha_active*exp(1-rho/sigma_thresh) : alpha_active);
viscosity[m] = alpha * cs2 / omega; // aplha*cs^2/Omega */
}
}
// locally isothermal non-adaptive viscosity (density independent)
__global__ void kernel_visco2d (double *vrad,
double *vtheta,
double *vradnew,
double *vthetanew,
double *dens,
double *viscosity,
double *tau_rr,
double *tau_rp,
double *tau_pp,
int ns,
int nr,
int pitch,
double invdphi,
double dt,
double vtheta_in,
double vtheta_out,
bool viscosity2d,
bool visc_heating) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int jgp = jg+1;
if (jg == ns-1) jgp = 0;
int jgm = jg-1;
if (jg == 0) jgm = ns-1;
int idg = __mul24(ig, pitch) + jg;
int ids = __mul24(is, blockDim.x+2) + js;
int lim, l, lip, ils, igp;
__shared__ double Trr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Tpp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Trp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double div_v[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double rho[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vt[(BLOCK_X+2)*(BLOCK_Y+2)];
// first get viscosity
double nu;
// We perform a coalesced read of 'rho', 'vr' and 'vtheta" into the shared memory;
rho[ids] = dens[idg];
vr[ids] = vrad[idg];
vt[ids] = vtheta[idg];
// Some necessary exceptions on the edges:
// EDGE 1 : "LEFT EDGE"
if ((is == 2) && (js <= blockDim.y)) {
// read by second row...
int it = ig-2+js;
int jt = jg-js;
if (jt < 0) jt += ns;
ils = js*(blockDim.x+2);
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 2: "RIGHT EDGE".
// read by third row...
if ((is ==3) && (js <= blockDim.y)) {
int it = ig-3+js;
int jt = jg-js + blockDim.x+1;
if (jt > ns-1) jt -= ns;
ils = js*(blockDim.x+2)+blockDim.x+1;
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 3: "BOTTOM EDGE". Be careful not to read anything if in first row...
if ((is == 1) && (ig > 0)) {
rho[js] = dens[idg-(int)pitch];
vr[js] = vrad[idg-(int)pitch];
vt[js] = vtheta[idg-(int)pitch];
}
// EDGE 4: "TOP EDGE". Be careful not to read anything if in last row...
if ((is == blockDim.y) && (ig < nr-1)) {
rho[ids+blockDim.x+2] = dens[idg+(int)pitch];
vr[ids+blockDim.x+2] = vrad[idg+(int)pitch];
vt[ids+blockDim.x+2] = vtheta[idg+(int)pitch];
}
if ((is == blockDim.y) && (ig == nr-1)) {
vr[ids+blockDim.x+2] = 0.0;
vt[ids+blockDim.x+2] = 0.0;
rho[ids+blockDim.x+2] = 0.0;
}
// And now some corners... "Bottom-left" first;
if ((ig > 0) && (is == 1) && (js == 1)) {
rho[0] = GET_TAB (dens, jgm, ig-1, pitch);
vr[0] = GET_TAB (vrad, jgm, ig-1, pitch);
vt[0] = GET_TAB (vtheta, jgm, ig-1, pitch);
}
// now bottom-right
if ((ig > 0) && (is == 1) && (js == blockDim.x)) {
rho[blockDim.x+1] = GET_TAB (dens, jgp, ig-1, pitch);
vr[blockDim.x+1] = GET_TAB (vrad, jgp, ig-1, pitch);
vt[blockDim.x+1] = GET_TAB (vtheta, jgp, ig-1, pitch);
}
// now "top-left"... top-right is not needed
if ((ig < nr-1) && (is == blockDim.y) && (js == 1)) {
rho[ids+blockDim.x+1] = GET_TAB (dens, jgm, ig+1, pitch);
vr[ids+blockDim.x+1] = GET_TAB (vrad, jgm, ig+1, pitch);
vt[ids+blockDim.x+1] = GET_TAB (vtheta, jgm, ig+1, pitch);
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
if (viscosity2d) {
nu = viscosity[idg];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
// We need Trr & Tpp in bottom row
if ((ig > 0) && (is == 1)) {
igp = ig-1;
l = ids-blockDim.x-2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (viscosity2d) {
nu = viscosity[idg-ns];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Tpp in left column
if (js == 1) {
igp = ig;
l = ids-1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (viscosity2d) {
nu = viscosity[idg];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Trp in right column and in top row. Top row first
if ((ig < nr-1) && (is == blockDim.y)) {
igp = ig+1;
l = ids+blockDim.x+2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if (viscosity2d)
nu = viscosity[idg+ns];
else
nu = visco;
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
// And now right column
if (js == blockDim.x) {
igp = ig;
l = ids+1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if (viscosity2d)
nu = viscosity[idg];
else
nu = visco;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if ((ig > 0) && (ig < nr-1)) {
vthetanew[idg] = vt[l] + dt*invrmed*((rsup*Trp[lip]-rinf*Trp[l])*invdiffrsup+(Tpp[l]-Tpp[l-1])*invdphi+0.5*(Trp[l]+Trp[lip]))/(0.5*(rho[l]+rho[l-1]));
}
if (ig > 0) {
vradnew[idg] = vr[l] + dt*invrinf*((rmed*Trr[l]-rmedm*Trr[lim])*invdiffrmed+(Trp[l+1]-Trp[l])*invdphi-0.5*(Tpp[l]+Tpp[lim]))/(0.5*(rho[l]+rho[lim]));
}
else {
vradnew[idg] = 0.0;
}
if (ig == 0)
vthetanew[idg] = vtheta_in;
if (ig == nr-1)
vthetanew[idg] = vtheta_out;
if (visc_heating) {
// for adiabtic disk we need to store Divergence, TauRR, TauRP, and TauPP
tau_rr[idg] = Trr[l];
tau_rp[idg] = Trp[l];
tau_pp[idg] = Tpp[l];
}
}
extern "C"
void ViscousTerms_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, double dt,
PolarGrid *Vrad_ret, PolarGrid *Vtheta_ret) {
int nr, ns;
// double Vtheta_In, Vtheta_Out, OmegaIn, OmegaOut;
nr = Vrad->Nrad;
ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
double *Energy_gpu_field = NULL;
if (Adiabatic) {
Energy_gpu_field = Energy->gpu_field;
}
double *Viscosity_gpu_field = NULL;
if (Adiabatic || AdaptiveViscosity)
Viscosity_gpu_field = Viscosity->gpu_field;
double *TauRR_gpu_field = NULL, *TauRP_gpu_field = NULL, *TauPP_gpu_field = NULL;
if (ViscHeating) {
TauRR_gpu_field = TauRR->gpu_field;
TauRP_gpu_field = TauRP->gpu_field;
TauPP_gpu_field = TauPP->gpu_field;
}
// calcualte viscosty
// for constant kinematic voscosity
// adaptive alpha viscosity
if (AdaptiveViscosity) {
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_calc_adaptive_alpha_visco) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ALPHAVISCOSITY,
ALPHAVISCOSITYDEAD,
ALPHASMOOTH,
ALPHASIGMATHRESH,
ADIABATICINDEX,
ASPECTRATIO,
ns,
nr,
Viscosity->pitch/sizeof(double),
Adiabatic);
hipDeviceSynchronize();
getLastCudaError ("kernel_calc_adaptive_alpha_visco failed");
}
// alpha viscosity with stationary dead zone
else if (ViscosityAlpha && Adiabatic && DeadZone) {
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_calc_dze_alpha_visco) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ADIABATICINDEX,
DEADZONEALPHA,
DEADZONERIN,
DEADZONEDELTARIN,
DEADZONEROUT,
DEADZONEDELTAROUT,
ns,
nr,
Viscosity->pitch/sizeof(double));
hipDeviceSynchronize();
getLastCudaError ("kernel_calc_dze_alpha_visco failed");
}
// pure alpha voscosity with adiabatic gas eos
else if (ViscosityAlpha && Adiabatic) {
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_calc_alpha_visco) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ALPHAVISCOSITY,
ADIABATICINDEX,
ns,
nr,
Viscosity->pitch/sizeof(double));
hipDeviceSynchronize();
getLastCudaError ("kernel_calc_alpha_visco failed");
}
// now we can calcuate viscous terms
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_visco2d) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
tmp1->gpu_field,
tmp2->gpu_field,
Rho->gpu_field,
Viscosity_gpu_field,
TauRR_gpu_field,
TauRP_gpu_field,
TauPP_gpu_field,
Rho->Nsec,
Rho->Nrad,
Rho->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt,
GasVelThetaMed[0],
GasVelThetaMed[nr-1],
(Adiabatic || AdaptiveViscosity),
ViscHeating);
hipDeviceSynchronize();
getLastCudaError ("kernel_visco2d failed");
FARGO_SAFE(ActualiseGas_gpu (Vrad_ret, tmp1));
FARGO_SAFE(ActualiseGas_gpu (Vtheta_ret, tmp2));
// HM
//double *temp;
// temp = Vrad->gpu_field;
// Vrad_ret->gpu_field = tmp1->gpu_field;
// VradNew->gpu_field = temp;
// temp = Vtheta->gpu_field;
// Vtheta_ret->gpu_field = tmp2->gpu_field;
// VthetaNew->gpu_field = temp;
}
// locally isothermal non-adaptive viscosity (density independent)
__global__ void kernel_visco1d (double *vrad,
double *vtheta,
double *vradnew,
double *vthetanew,
double *dens,
int ns,
int nr,
int pitch,
double invdphi,
double dt,
double vtheta_in,
double vtheta_out) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int jgp = jg+1;
if (jg == ns-1) jgp = 0;
int jgm = jg-1;
if (jg == 0) jgm = ns-1;
int idg = __mul24(ig, pitch) + jg;
int ids = __mul24(is, blockDim.x+2) + js;
int lim, l, lip, ils, igp;
__shared__ double Trr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Tpp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Trp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double div_v[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double rho[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vt[(BLOCK_X+2)*(BLOCK_Y+2)];
// first get viscosity
double nu=1e-6;
// We perform a coalesced read of 'rho', 'vr' and 'vtheta" into the shared memory;
rho[ids] = dens[idg];
vr[ids] = vrad[idg];
vt[ids] = vtheta[idg];
// Some necessary exceptions on the edges:
// EDGE 1 : "LEFT EDGE"
if ((is == 2) && (js <= blockDim.y)) {
// read by second row...
int it = ig-2+js;
int jt = jg-js;
if (jt < 0) jt += ns;
ils = js*(blockDim.x+2);
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 2: "RIGHT EDGE".
// read by third row...
if ((is ==3) && (js <= blockDim.y)) {
int it = ig-3+js;
int jt = jg-js + blockDim.x+1;
if (jt > ns-1) jt -= ns;
ils = js*(blockDim.x+2)+blockDim.x+1;
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 3: "BOTTOM EDGE". Be careful not to read anything if in first row...
if ((is == 1) && (ig > 0)) {
rho[js] = dens[idg-(int)pitch];
vr[js] = vrad[idg-(int)pitch];
vt[js] = vtheta[idg-(int)pitch];
}
// EDGE 4: "TOP EDGE". Be careful not to read anything if in last row...
if ((is == blockDim.y) && (ig < nr-1)) {
rho[ids+blockDim.x+2] = dens[idg+(int)pitch];
vr[ids+blockDim.x+2] = vrad[idg+(int)pitch];
vt[ids+blockDim.x+2] = vtheta[idg+(int)pitch];
}
if ((is == blockDim.y) && (ig == nr-1)) {
vr[ids+blockDim.x+2] = 0.0;
vt[ids+blockDim.x+2] = 0.0;
rho[ids+blockDim.x+2] = 0.0;
}
// And now some corners... "Bottom-left" first;
if ((ig > 0) && (is == 1) && (js == 1)) {
rho[0] = GET_TAB (dens, jgm, ig-1, pitch);
vr[0] = GET_TAB (vrad, jgm, ig-1, pitch);
vt[0] = GET_TAB (vtheta, jgm, ig-1, pitch);
}
// now bottom-right
if ((ig > 0) && (is == 1) && (js == blockDim.x)) {
rho[blockDim.x+1] = GET_TAB (dens, jgp, ig-1, pitch);
vr[blockDim.x+1] = GET_TAB (vrad, jgp, ig-1, pitch);
vt[blockDim.x+1] = GET_TAB (vtheta, jgp, ig-1, pitch);
}
// now "top-left"... top-right is not needed
if ((ig < nr-1) && (is == blockDim.y) && (js == 1)) {
rho[ids+blockDim.x+1] = GET_TAB (dens, jgm, ig+1, pitch);
vr[ids+blockDim.x+1] = GET_TAB (vrad, jgm, ig+1, pitch);
vt[ids+blockDim.x+1] = GET_TAB (vtheta, jgm, ig+1, pitch);
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
// We need Trr & Tpp in bottom row
if ((ig > 0) && (is == 1)) {
igp = ig-1;
l = ids-blockDim.x-2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Tpp in left column
if (js == 1) {
igp = ig;
l = ids-1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Trp in right column and in top row. Top row first
if ((ig < nr-1) && (is == blockDim.y)) {
igp = ig+1;
l = ids+blockDim.x+2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
// And now right column
if (js == blockDim.x) {
igp = ig;
l = ids+1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if ((ig > 1) && (ig < nr-3))
vthetanew[idg] = vt[l] + dt*invrmed*((rsup*Trp[lip]-rinf*Trp[l])*invdiffrsup+(Tpp[l]-Tpp[l-1])*invdphi+0.5*(Trp[l]+Trp[lip]))/(0.5*(rho[l]+rho[l-1]));
else
vthetanew[idg] = 0;
if (ig > 0)
vradnew[idg] = vr[l] + dt*invrinf*((rmed*Trr[l]-rmedm*Trr[lim])*invdiffrmed+(Trp[l+1]-Trp[l])*invdphi-0.5*(Tpp[l]+Tpp[lim]))/(0.5*(rho[l]+rho[lim]));
else
vradnew[idg] = 0.0;
}
extern "C"
void ViscousTermsDust_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, double dt,
PolarGrid *Vrad_ret, PolarGrid *Vtheta_ret) {
int nr, ns;
nr = Vrad->Nrad;
ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
// now we can calcuate viscous terms
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_visco1d) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
tmp1->gpu_field,
tmp2->gpu_field,
Rho->gpu_field,
Rho->Nsec,
Rho->Nrad,
Rho->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt,
GasVelThetaMed[0],
GasVelThetaMed[nr-1]);
hipDeviceSynchronize();
getLastCudaError ("kernel_visco1d failed");
FARGO_SAFE(ActualiseGas_gpu (Vrad_ret, tmp1));
FARGO_SAFE(ActualiseGas_gpu (Vtheta_ret, tmp2));
// HM
//double *temp;
// temp = Vrad->gpu_field;
// Vrad_ret->gpu_field = tmp1->gpu_field;
// VradNew->gpu_field = temp;
// temp = Vtheta->gpu_field;
// Vtheta_ret->gpu_field = tmp2->gpu_field;
// VthetaNew->gpu_field = temp;
}
|
cd9385975b7ce955080618b39e073e6763f5a9db.cu
|
/** \file "template.cu" : implements the kernel for the "template" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_VISCO
#define BLOCK_X 64
// BLOCK_Y : in radius
#define BLOCK_Y 4
#define invdiffrmed CRadiiStuff[ igp]
#define cs2 CRadiiStuff[(nr+1)*1 + igp]
#define invrmed CRadiiStuff[(nr+1)*2 + igp]
#define invrmedm CRadiiStuff[(nr+1)*2 + igp-1]
#define invrinf CRadiiStuff[(nr+1)*3 + igp]
#define rinf CRadiiStuff[(nr+1)*4 + igp]
#define rmed CRadiiStuff[(nr+1)*6 + igp]
#define rmedm CRadiiStuff[(nr+1)*6 + igp-1]
#define rsup CRadiiStuff[(nr+1)*8 + igp]
#define invdiffrsup CRadiiStuff[(nr+1)*10+ igp]
#define visco CRadiiStuff[(nr+1)*12+ igp]
#define alphaval CRadiiStuff[(nr+1)*13+ igp]
#define omega CRadiiStuff[(nr+1)*14+ igp]
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
// [RZS-MOD]
extern double SGAccInnerEdge, SGAccOuterEdge;
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
// calcualte adiabatic alpha viscosity value at each cell
__global__ void kernel_calc_alpha_visco (double *dens,
double *energy,
double *viscosity,
double alpha,
double adiabatic_index,
int ns,
int nr,
int pitch) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int idg = jg+ig*pitch;
const int igp = ig;
const double csa2 = adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg];
// viscosity[idg] = alpha * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[idg] = alpha * csa2 / omega;
}
// calcualte adiabatic alpha viscosity value at each cell
__global__ void kernel_calc_dze_alpha_visco (double *dens,
double *energy,
double *viscosity,
double adiabatic_index,
double viscmod,
double viscmodr1,
double viscmoddeltar1,
double viscmodr2,
double viscmoddeltar2,
int ns,
int nr,
int pitch) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int idg = jg+ig*pitch;
const int igp = ig;
const double csa2 = adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg];
//viscosity[idg] = alphaval * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[idg] = alphaval * csa2 / omega; // aplha*cs^2/Omega
}
// calcualte density dependent alpha viscosity value at each cell
__global__ void kernel_calc_adaptive_alpha_visco (double *dens,
double *energy,
double *viscosity,
double alpha_active,
double alpha_dead,
double alpha_smooth,
double sigma_thresh,
double adiabatic_index,
double aspect_ratio,
int ns,
int nr,
int pitch,
bool adiabatic) {
const int jg = threadIdx.x + blockIdx.x * blockDim.x;
const int ig = threadIdx.y + blockIdx.y * blockDim.y;
const int m = jg+ig*pitch;
const int igp = ig;
if (adiabatic) {
const double rho = dens[m];
const double alpha = (1.0-tanh ((rho-sigma_thresh) / (sigma_thresh * alpha_smooth * aspect_ratio))) * alpha_active * 0.5 + alpha_dead;
const double mycs2 = adiabatic_index*(adiabatic_index-1.0)*energy[m]/rho;
//viscosity[m] = alpha * csa2 * pow(rmed, 1.5); // aplha*cs^2/Omega
viscosity[m] = alpha * mycs2 / omega; // aplha*cs^2/Omega
}
else {
const double alpha = (1.0-tanh ((dens[m]-sigma_thresh) / (sigma_thresh * alpha_smooth * aspect_ratio))) * alpha_active * 0.5 + alpha_dead;
viscosity[m] = alpha * cs2 * pow(rmed, 1.5); // aplha*cs^2/Omega
/*const double rho = dens[m];
const double sigma_dead = rho-sigma_thresh;
//const double alpha = (sigma_dead > 0 ? (alpha_active*sigma_thresh+sigma_dead*alpha_dead)/rho : alpha_active);
const double alpha = (sigma_dead > 0 ? alpha_dead+alpha_active*exp(1-rho/sigma_thresh) : alpha_active);
viscosity[m] = alpha * cs2 / omega; // aplha*cs^2/Omega */
}
}
// locally isothermal non-adaptive viscosity (density independent)
__global__ void kernel_visco2d (double *vrad,
double *vtheta,
double *vradnew,
double *vthetanew,
double *dens,
double *viscosity,
double *tau_rr,
double *tau_rp,
double *tau_pp,
int ns,
int nr,
int pitch,
double invdphi,
double dt,
double vtheta_in,
double vtheta_out,
bool viscosity2d,
bool visc_heating) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int jgp = jg+1;
if (jg == ns-1) jgp = 0;
int jgm = jg-1;
if (jg == 0) jgm = ns-1;
int idg = __mul24(ig, pitch) + jg;
int ids = __mul24(is, blockDim.x+2) + js;
int lim, l, lip, ils, igp;
__shared__ double Trr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Tpp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Trp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double div_v[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double rho[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vt[(BLOCK_X+2)*(BLOCK_Y+2)];
// first get viscosity
double nu;
// We perform a coalesced read of 'rho', 'vr' and 'vtheta" into the shared memory;
rho[ids] = dens[idg];
vr[ids] = vrad[idg];
vt[ids] = vtheta[idg];
// Some necessary exceptions on the edges:
// EDGE 1 : "LEFT EDGE"
if ((is == 2) && (js <= blockDim.y)) {
// read by second row...
int it = ig-2+js;
int jt = jg-js;
if (jt < 0) jt += ns;
ils = js*(blockDim.x+2);
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 2: "RIGHT EDGE".
// read by third row...
if ((is ==3) && (js <= blockDim.y)) {
int it = ig-3+js;
int jt = jg-js + blockDim.x+1;
if (jt > ns-1) jt -= ns;
ils = js*(blockDim.x+2)+blockDim.x+1;
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 3: "BOTTOM EDGE". Be careful not to read anything if in first row...
if ((is == 1) && (ig > 0)) {
rho[js] = dens[idg-(int)pitch];
vr[js] = vrad[idg-(int)pitch];
vt[js] = vtheta[idg-(int)pitch];
}
// EDGE 4: "TOP EDGE". Be careful not to read anything if in last row...
if ((is == blockDim.y) && (ig < nr-1)) {
rho[ids+blockDim.x+2] = dens[idg+(int)pitch];
vr[ids+blockDim.x+2] = vrad[idg+(int)pitch];
vt[ids+blockDim.x+2] = vtheta[idg+(int)pitch];
}
if ((is == blockDim.y) && (ig == nr-1)) {
vr[ids+blockDim.x+2] = 0.0;
vt[ids+blockDim.x+2] = 0.0;
rho[ids+blockDim.x+2] = 0.0;
}
// And now some corners... "Bottom-left" first;
if ((ig > 0) && (is == 1) && (js == 1)) {
rho[0] = GET_TAB (dens, jgm, ig-1, pitch);
vr[0] = GET_TAB (vrad, jgm, ig-1, pitch);
vt[0] = GET_TAB (vtheta, jgm, ig-1, pitch);
}
// now bottom-right
if ((ig > 0) && (is == 1) && (js == blockDim.x)) {
rho[blockDim.x+1] = GET_TAB (dens, jgp, ig-1, pitch);
vr[blockDim.x+1] = GET_TAB (vrad, jgp, ig-1, pitch);
vt[blockDim.x+1] = GET_TAB (vtheta, jgp, ig-1, pitch);
}
// now "top-left"... top-right is not needed
if ((ig < nr-1) && (is == blockDim.y) && (js == 1)) {
rho[ids+blockDim.x+1] = GET_TAB (dens, jgm, ig+1, pitch);
vr[ids+blockDim.x+1] = GET_TAB (vrad, jgm, ig+1, pitch);
vt[ids+blockDim.x+1] = GET_TAB (vtheta, jgm, ig+1, pitch);
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
if (viscosity2d) {
nu = viscosity[idg];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
// We need Trr & Tpp in bottom row
if ((ig > 0) && (is == 1)) {
igp = ig-1;
l = ids-blockDim.x-2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (viscosity2d) {
nu = viscosity[idg-ns];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Tpp in left column
if (js == 1) {
igp = ig;
l = ids-1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (viscosity2d) {
nu = viscosity[idg];
//divergence_vel[idg] = div_v[l];
}
else
nu = visco;
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Trp in right column and in top row. Top row first
if ((ig < nr-1) && (is == blockDim.y)) {
igp = ig+1;
l = ids+blockDim.x+2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if (viscosity2d)
nu = viscosity[idg+ns];
else
nu = visco;
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
// And now right column
if (js == blockDim.x) {
igp = ig;
l = ids+1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if (viscosity2d)
nu = viscosity[idg];
else
nu = visco;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if ((ig > 0) && (ig < nr-1)) {
vthetanew[idg] = vt[l] + dt*invrmed*((rsup*Trp[lip]-rinf*Trp[l])*invdiffrsup+(Tpp[l]-Tpp[l-1])*invdphi+0.5*(Trp[l]+Trp[lip]))/(0.5*(rho[l]+rho[l-1]));
}
if (ig > 0) {
vradnew[idg] = vr[l] + dt*invrinf*((rmed*Trr[l]-rmedm*Trr[lim])*invdiffrmed+(Trp[l+1]-Trp[l])*invdphi-0.5*(Tpp[l]+Tpp[lim]))/(0.5*(rho[l]+rho[lim]));
}
else {
vradnew[idg] = 0.0;
}
if (ig == 0)
vthetanew[idg] = vtheta_in;
if (ig == nr-1)
vthetanew[idg] = vtheta_out;
if (visc_heating) {
// for adiabtic disk we need to store Divergence, TauRR, TauRP, and TauPP
tau_rr[idg] = Trr[l];
tau_rp[idg] = Trp[l];
tau_pp[idg] = Tpp[l];
}
}
extern "C"
void ViscousTerms_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, double dt,
PolarGrid *Vrad_ret, PolarGrid *Vtheta_ret) {
int nr, ns;
// double Vtheta_In, Vtheta_Out, OmegaIn, OmegaOut;
nr = Vrad->Nrad;
ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
double *Energy_gpu_field = NULL;
if (Adiabatic) {
Energy_gpu_field = Energy->gpu_field;
}
double *Viscosity_gpu_field = NULL;
if (Adiabatic || AdaptiveViscosity)
Viscosity_gpu_field = Viscosity->gpu_field;
double *TauRR_gpu_field = NULL, *TauRP_gpu_field = NULL, *TauPP_gpu_field = NULL;
if (ViscHeating) {
TauRR_gpu_field = TauRR->gpu_field;
TauRP_gpu_field = TauRP->gpu_field;
TauPP_gpu_field = TauPP->gpu_field;
}
// calcualte viscosty
// for constant kinematic voscosity
// adaptive alpha viscosity
if (AdaptiveViscosity) {
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, cudaMemcpyHostToDevice));
kernel_calc_adaptive_alpha_visco <<< grid, block >>> (Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ALPHAVISCOSITY,
ALPHAVISCOSITYDEAD,
ALPHASMOOTH,
ALPHASIGMATHRESH,
ADIABATICINDEX,
ASPECTRATIO,
ns,
nr,
Viscosity->pitch/sizeof(double),
Adiabatic);
cudaThreadSynchronize();
getLastCudaError ("kernel_calc_adaptive_alpha_visco failed");
}
// alpha viscosity with stationary dead zone
else if (ViscosityAlpha && Adiabatic && DeadZone) {
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, cudaMemcpyHostToDevice));
kernel_calc_dze_alpha_visco <<< grid, block >>> (Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ADIABATICINDEX,
DEADZONEALPHA,
DEADZONERIN,
DEADZONEDELTARIN,
DEADZONEROUT,
DEADZONEDELTAROUT,
ns,
nr,
Viscosity->pitch/sizeof(double));
cudaThreadSynchronize();
getLastCudaError ("kernel_calc_dze_alpha_visco failed");
}
// pure alpha voscosity with adiabatic gas eos
else if (ViscosityAlpha && Adiabatic) {
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, cudaMemcpyHostToDevice));
kernel_calc_alpha_visco <<< grid, block >>> (Rho->gpu_field,
Energy_gpu_field,
Viscosity_gpu_field,
ALPHAVISCOSITY,
ADIABATICINDEX,
ns,
nr,
Viscosity->pitch/sizeof(double));
cudaThreadSynchronize();
getLastCudaError ("kernel_calc_alpha_visco failed");
}
// now we can calcuate viscous terms
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, cudaMemcpyHostToDevice));
kernel_visco2d <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
tmp1->gpu_field,
tmp2->gpu_field,
Rho->gpu_field,
Viscosity_gpu_field,
TauRR_gpu_field,
TauRP_gpu_field,
TauPP_gpu_field,
Rho->Nsec,
Rho->Nrad,
Rho->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt,
GasVelThetaMed[0],
GasVelThetaMed[nr-1],
(Adiabatic || AdaptiveViscosity),
ViscHeating);
cudaThreadSynchronize();
getLastCudaError ("kernel_visco2d failed");
FARGO_SAFE(ActualiseGas_gpu (Vrad_ret, tmp1));
FARGO_SAFE(ActualiseGas_gpu (Vtheta_ret, tmp2));
// HM
//double *temp;
// temp = Vrad->gpu_field;
// Vrad_ret->gpu_field = tmp1->gpu_field;
// VradNew->gpu_field = temp;
// temp = Vtheta->gpu_field;
// Vtheta_ret->gpu_field = tmp2->gpu_field;
// VthetaNew->gpu_field = temp;
}
// locally isothermal non-adaptive viscosity (density independent)
__global__ void kernel_visco1d (double *vrad,
double *vtheta,
double *vradnew,
double *vthetanew,
double *dens,
int ns,
int nr,
int pitch,
double invdphi,
double dt,
double vtheta_in,
double vtheta_out) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int jgp = jg+1;
if (jg == ns-1) jgp = 0;
int jgm = jg-1;
if (jg == 0) jgm = ns-1;
int idg = __mul24(ig, pitch) + jg;
int ids = __mul24(is, blockDim.x+2) + js;
int lim, l, lip, ils, igp;
__shared__ double Trr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Tpp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double Trp[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double div_v[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double rho[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vr[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double vt[(BLOCK_X+2)*(BLOCK_Y+2)];
// first get viscosity
double nu=1e-6;
// We perform a coalesced read of 'rho', 'vr' and 'vtheta" into the shared memory;
rho[ids] = dens[idg];
vr[ids] = vrad[idg];
vt[ids] = vtheta[idg];
// Some necessary exceptions on the edges:
// EDGE 1 : "LEFT EDGE"
if ((is == 2) && (js <= blockDim.y)) {
// read by second row...
int it = ig-2+js;
int jt = jg-js;
if (jt < 0) jt += ns;
ils = js*(blockDim.x+2);
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 2: "RIGHT EDGE".
// read by third row...
if ((is ==3) && (js <= blockDim.y)) {
int it = ig-3+js;
int jt = jg-js + blockDim.x+1;
if (jt > ns-1) jt -= ns;
ils = js*(blockDim.x+2)+blockDim.x+1;
jt = jt+__mul24(it,pitch);
rho[ils] = dens[jt];
vr[ils] = vrad[jt];
vt[ils] = vtheta[jt];
}
// EDGE 3: "BOTTOM EDGE". Be careful not to read anything if in first row...
if ((is == 1) && (ig > 0)) {
rho[js] = dens[idg-(int)pitch];
vr[js] = vrad[idg-(int)pitch];
vt[js] = vtheta[idg-(int)pitch];
}
// EDGE 4: "TOP EDGE". Be careful not to read anything if in last row...
if ((is == blockDim.y) && (ig < nr-1)) {
rho[ids+blockDim.x+2] = dens[idg+(int)pitch];
vr[ids+blockDim.x+2] = vrad[idg+(int)pitch];
vt[ids+blockDim.x+2] = vtheta[idg+(int)pitch];
}
if ((is == blockDim.y) && (ig == nr-1)) {
vr[ids+blockDim.x+2] = 0.0;
vt[ids+blockDim.x+2] = 0.0;
rho[ids+blockDim.x+2] = 0.0;
}
// And now some corners... "Bottom-left" first;
if ((ig > 0) && (is == 1) && (js == 1)) {
rho[0] = GET_TAB (dens, jgm, ig-1, pitch);
vr[0] = GET_TAB (vrad, jgm, ig-1, pitch);
vt[0] = GET_TAB (vtheta, jgm, ig-1, pitch);
}
// now bottom-right
if ((ig > 0) && (is == 1) && (js == blockDim.x)) {
rho[blockDim.x+1] = GET_TAB (dens, jgp, ig-1, pitch);
vr[blockDim.x+1] = GET_TAB (vrad, jgp, ig-1, pitch);
vt[blockDim.x+1] = GET_TAB (vtheta, jgp, ig-1, pitch);
}
// now "top-left"... top-right is not needed
if ((ig < nr-1) && (is == blockDim.y) && (js == 1)) {
rho[ids+blockDim.x+1] = GET_TAB (dens, jgm, ig+1, pitch);
vr[ids+blockDim.x+1] = GET_TAB (vrad, jgm, ig+1, pitch);
vt[ids+blockDim.x+1] = GET_TAB (vtheta, jgm, ig+1, pitch);
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
// We need Trr & Tpp in bottom row
if ((ig > 0) && (is == 1)) {
igp = ig-1;
l = ids-blockDim.x-2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trr[l] = (vr[lip]-vr[l])*invdiffrsup;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
Trr[l] = 2.0*rho[l]*nu*(Trr[l]-(1.0/3.0)*div_v[l]);
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Tpp in left column
if (js == 1) {
igp = ig;
l = ids-1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Tpp[l] = ((vt[l+1]-vt[l])*invdphi+0.5*(vr[lip]+vr[l]))*invrmed;
div_v[l] = (vr[lip]*rsup-vr[l]*rinf)*invdiffrsup;
div_v[l] += (vt[l+1]-vt[l])*invdphi;
div_v[l] *= invrmed;
Tpp[l] = 2.0*rho[l]*nu*(Tpp[l]-(1.0/3.0)*div_v[l]);
}
// We need Trp in right column and in top row. Top row first
if ((ig < nr-1) && (is == blockDim.y)) {
igp = ig+1;
l = ids+blockDim.x+2;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
// And now right column
if (js == blockDim.x) {
igp = ig;
l = ids+1;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
//nu = 1e-7;
if (ig > 0)
Trp[l] = 0.5*(rinf*((vt[l]+1.0/sqrt(rmed))*invrmed-(vt[lim]+1.0/sqrt(rmedm))*invrmedm)*invdiffrmed+(vr[l]-vr[l-1])*invdphi*invrinf);
else
Trp[l] = 0.0;
Trp[l] = 0.5*(rho[l]+rho[l-1]+rho[lim]+rho[lim-1])*nu*Trp[l];
}
__syncthreads ();
igp = ig;
l = ids;
lip = l + blockDim.x+2;
lim = l - blockDim.x-2;
if ((ig > 1) && (ig < nr-3))
vthetanew[idg] = vt[l] + dt*invrmed*((rsup*Trp[lip]-rinf*Trp[l])*invdiffrsup+(Tpp[l]-Tpp[l-1])*invdphi+0.5*(Trp[l]+Trp[lip]))/(0.5*(rho[l]+rho[l-1]));
else
vthetanew[idg] = 0;
if (ig > 0)
vradnew[idg] = vr[l] + dt*invrinf*((rmed*Trr[l]-rmedm*Trr[lim])*invdiffrmed+(Trp[l+1]-Trp[l])*invdphi-0.5*(Tpp[l]+Tpp[lim]))/(0.5*(rho[l]+rho[lim]));
else
vradnew[idg] = 0.0;
}
extern "C"
void ViscousTermsDust_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, double dt,
PolarGrid *Vrad_ret, PolarGrid *Vtheta_ret) {
int nr, ns;
nr = Vrad->Nrad;
ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
// now we can calcuate viscous terms
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(15*(nr+1))*sizeof(double),0, cudaMemcpyHostToDevice));
kernel_visco1d <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
tmp1->gpu_field,
tmp2->gpu_field,
Rho->gpu_field,
Rho->Nsec,
Rho->Nrad,
Rho->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt,
GasVelThetaMed[0],
GasVelThetaMed[nr-1]);
cudaThreadSynchronize();
getLastCudaError ("kernel_visco1d failed");
FARGO_SAFE(ActualiseGas_gpu (Vrad_ret, tmp1));
FARGO_SAFE(ActualiseGas_gpu (Vtheta_ret, tmp2));
// HM
//double *temp;
// temp = Vrad->gpu_field;
// Vrad_ret->gpu_field = tmp1->gpu_field;
// VradNew->gpu_field = temp;
// temp = Vtheta->gpu_field;
// Vtheta_ret->gpu_field = tmp2->gpu_field;
// VthetaNew->gpu_field = temp;
}
|
8b9200738e5f1edf3d6e69598a004c4849562904.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, h2, momentum, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
|
8b9200738e5f1edf3d6e69598a004c4849562904.cu
|
/*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, h2, momentum, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
|
9c1233d4e05610bd286fb95b590283baf90506cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <windows.h>
void checkResult(float *A, float *B, const int nx, const int ny)
{
int i = 0;
int j = 0;
int cnt = 0;
double err = 1.0E-6;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
if (fabs(A[cnt] - B[cnt]) > err)
{
printf("Do not match...\n");
return;
}
cnt++;
}
}
printf("matched!\n");
}
void initialData(float *a, int nx, int ny)
{
int i = nx;
int j = ny;
int cnt = 0;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
a[cnt] = cnt;
cnt++;
}
}
}
// summary matrix on CPU
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
int i = 0;
int j = 0;
int cnt = 0;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
C[cnt] = A[cnt] + B[cnt];
cnt++;
}
}
}
void PrintMatrix(float *a)
{
int i;
for (i = 1024*1024-10; i < 1024*1024; i++)
{
printf("%f ", a[i]);
}
printf("\n");
}
// summary matrix on GPU
__global__ void sumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int idx = y*nx + 2*x;
if (x < nx && y < ny)
{
for (int j = 0; j < 2; j++)
{
C[idx + j] = A[idx + j] + B[idx + j];
}
}
}
__global__ void Book(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int nxthreads = gridDim.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ix2 = ix + nxthreads;
unsigned int idx = iy * nx + ix;
unsigned int idx2 = iy * nx + ix2;
if (iy < ny)
{
if (ix < nx)
MatC[idx] = MatA[idx] + MatB[idx];
if (ix2 < nx)
MatC[idx2] = MatA[idx2] + MatB[idx2];
}
}
__global__ void test()
{
printf("hello\n");
}
int main(int argc, char *argv[])
{
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
hipSetDevice(dev);
int nx = 1 << 10; // nxny1<<14nx*ny = 1<<28
int ny = 1 << 10;
int nxy = nx * ny;
int nBytes = sizeof(float)*nxy;
printf("Matrix size: nx:%d, ny:%d\n", nx, ny);
float *h_A, *h_B, *h_C, *gpuRef;
float *d_A, *d_B, *d_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
memset(gpuRef, 0, nBytes);
hipMalloc((void **)&d_A, nBytes);
hipMalloc((void **)&d_B, nBytes);
hipMalloc((void **)&d_C, nBytes);
// initialize the data
initialData(h_A, nx, ny);
initialData(h_B, nx, ny);
// copy the data from CPU to GPU
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// call the summary function
sumMatrixOnHost(h_A, h_B, h_C, nx, ny);
dim3 block(32);
dim3 grid((nx/2 + block.x - 1) / block.x, ny);
LARGE_INTEGER begin_me, begin_book;
LARGE_INTEGER end_me, end_book;
LARGE_INTEGER freq_me, freq_book;
printf("sumMatrixOnGPU <<<(%d, %d), (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
QueryPerformanceCounter(&freq_me);
QueryPerformanceCounter(&begin_me);
sumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
QueryPerformanceCounter(&end_me);
printf("GPU time consumption:%f ms\n", 1000 * (float)(end_me.QuadPart - begin_me.QuadPart) / (float)freq_me.QuadPart);
// copy the data from GPU to CPU
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check the result
checkResult(h_C, gpuRef, nx, ny);
/////////////////////////////////////////////////////////
// book kernel function //
/////////////////////////////////////////////////////////
QueryPerformanceCounter(&freq_book);
QueryPerformanceCounter(&begin_book);
Book << <grid, block >> >(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
QueryPerformanceCounter(&end_book);
printf("GPU time consumption:%f ms\n", 1000 * (float)(end_book.QuadPart - begin_book.QuadPart) / (float)freq_book.QuadPart);
// copy the data from GPU to CPU
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check the result
checkResult(h_C, gpuRef, nx, ny);
// free the memory
free(h_A);
free(h_B);
free(h_C);
free(gpuRef);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
return 0;
}
|
9c1233d4e05610bd286fb95b590283baf90506cd.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <windows.h>
void checkResult(float *A, float *B, const int nx, const int ny)
{
int i = 0;
int j = 0;
int cnt = 0;
double err = 1.0E-6;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
if (fabs(A[cnt] - B[cnt]) > err)
{
printf("Do not match...\n");
return;
}
cnt++;
}
}
printf("matched!\n");
}
void initialData(float *a, int nx, int ny)
{
int i = nx;
int j = ny;
int cnt = 0;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
a[cnt] = cnt;
cnt++;
}
}
}
// summary matrix on CPU
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
int i = 0;
int j = 0;
int cnt = 0;
for (j = 0; j < ny; j++)
{
for (i = 0; i < nx; i++)
{
C[cnt] = A[cnt] + B[cnt];
cnt++;
}
}
}
void PrintMatrix(float *a)
{
int i;
for (i = 1024*1024-10; i < 1024*1024; i++)
{
printf("%f ", a[i]);
}
printf("\n");
}
// summary matrix on GPU
__global__ void sumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int idx = y*nx + 2*x;
if (x < nx && y < ny)
{
for (int j = 0; j < 2; j++)
{
C[idx + j] = A[idx + j] + B[idx + j];
}
}
}
__global__ void Book(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int nxthreads = gridDim.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ix2 = ix + nxthreads;
unsigned int idx = iy * nx + ix;
unsigned int idx2 = iy * nx + ix2;
if (iy < ny)
{
if (ix < nx)
MatC[idx] = MatA[idx] + MatB[idx];
if (ix2 < nx)
MatC[idx2] = MatA[idx2] + MatB[idx2];
}
}
__global__ void test()
{
printf("hello\n");
}
int main(int argc, char *argv[])
{
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
cudaSetDevice(dev);
int nx = 1 << 10; // 此处对显卡的限制比较明显,书中可以让nx和ny分别为1<<14,所以nx*ny = 1<<28,但是我的显卡不行。
int ny = 1 << 10;
int nxy = nx * ny;
int nBytes = sizeof(float)*nxy;
printf("Matrix size: nx:%d, ny:%d\n", nx, ny);
float *h_A, *h_B, *h_C, *gpuRef;
float *d_A, *d_B, *d_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
memset(gpuRef, 0, nBytes);
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
// initialize the data
initialData(h_A, nx, ny);
initialData(h_B, nx, ny);
// copy the data from CPU to GPU
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// call the summary function
sumMatrixOnHost(h_A, h_B, h_C, nx, ny);
dim3 block(32);
dim3 grid((nx/2 + block.x - 1) / block.x, ny);
LARGE_INTEGER begin_me, begin_book;
LARGE_INTEGER end_me, end_book;
LARGE_INTEGER freq_me, freq_book;
printf("sumMatrixOnGPU <<<(%d, %d), (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
QueryPerformanceCounter(&freq_me);
QueryPerformanceCounter(&begin_me);
sumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
QueryPerformanceCounter(&end_me);
printf("GPU time consumption:%f ms\n", 1000 * (float)(end_me.QuadPart - begin_me.QuadPart) / (float)freq_me.QuadPart);
// copy the data from GPU to CPU
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check the result
checkResult(h_C, gpuRef, nx, ny);
/////////////////////////////////////////////////////////
// book kernel function //
/////////////////////////////////////////////////////////
QueryPerformanceCounter(&freq_book);
QueryPerformanceCounter(&begin_book);
Book << <grid, block >> >(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
QueryPerformanceCounter(&end_book);
printf("GPU time consumption:%f ms\n", 1000 * (float)(end_book.QuadPart - begin_book.QuadPart) / (float)freq_book.QuadPart);
// copy the data from GPU to CPU
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check the result
checkResult(h_C, gpuRef, nx, ny);
// free the memory
free(h_A);
free(h_B);
free(h_C);
free(gpuRef);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
return 0;
}
|
3923027a99f01e15458865c66ac330c412ae444b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
#define BLOCK_SIZE 32
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float result = 0;
int2 act_pos;
float filterValue = 0;
unsigned char inputData = 0;
const int filterOffsetSize = filterWidth / 2;
for(int filter_pos_x = 0; filter_pos_x < filterWidth; filter_pos_x++) {
for(int filter_pos_y = 0; filter_pos_y < filterWidth; filter_pos_y++) {
act_pos.x = thread_2D_pos.x - filterOffsetSize + filter_pos_x;
act_pos.y = thread_2D_pos.y - filterOffsetSize + filter_pos_y;
if(act_pos.x < 0)
act_pos.x = 0;
if(act_pos.x >= numCols)
act_pos.x = numCols - 1;
if(act_pos.y < 0)
act_pos.y = 0;
if(act_pos.y >= numRows)
act_pos.y = numRows - 1;
inputData = inputChannel[act_pos.y * numCols + act_pos.x];
filterValue = filter[filter_pos_y * filterWidth + filter_pos_x];
result += filterValue * inputData;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
size_t filterSize = sizeof(float) * filterWidth * filterWidth;
//Allocate memory for the filter on the GPU
checkCudaErrors(hipMalloc(&d_filter, filterSize));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU.
checkCudaErrors(hipMemcpy(d_filter, h_filter, filterSize, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize( ( numCols + blockSize.x - 1 ) / blockSize.x, ( numRows + blockSize.y - 1 ) / blockSize.y);
//Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//Call convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
3923027a99f01e15458865c66ac330c412ae444b.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
#define BLOCK_SIZE 32
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float result = 0;
int2 act_pos;
float filterValue = 0;
unsigned char inputData = 0;
const int filterOffsetSize = filterWidth / 2;
for(int filter_pos_x = 0; filter_pos_x < filterWidth; filter_pos_x++) {
for(int filter_pos_y = 0; filter_pos_y < filterWidth; filter_pos_y++) {
act_pos.x = thread_2D_pos.x - filterOffsetSize + filter_pos_x;
act_pos.y = thread_2D_pos.y - filterOffsetSize + filter_pos_y;
if(act_pos.x < 0)
act_pos.x = 0;
if(act_pos.x >= numCols)
act_pos.x = numCols - 1;
if(act_pos.y < 0)
act_pos.y = 0;
if(act_pos.y >= numRows)
act_pos.y = numRows - 1;
inputData = inputChannel[act_pos.y * numCols + act_pos.x];
filterValue = filter[filter_pos_y * filterWidth + filter_pos_x];
result += filterValue * inputData;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
size_t filterSize = sizeof(float) * filterWidth * filterWidth;
//Allocate memory for the filter on the GPU
checkCudaErrors(cudaMalloc(&d_filter, filterSize));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU.
checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterSize, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize( ( numCols + blockSize.x - 1 ) / blockSize.x, ( numRows + blockSize.y - 1 ) / blockSize.y);
//Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//Call convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
2f65de0a751868779c01263c6c080b3e2de3aff7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//----------------------------------------------------------------------
/*!\file gpu_algorithms/templateRegistration.cu
*
* \author Felix Laufer
*
*
* CUDA: Fast rotation-invariant template registration on large 2D matrices
*
*/
//----------------------------------------------------------------------
#include <hipfft.h>
#include "gpu_algorithms/phaseCorrelation.cu"
#include "gpu_algorithms/debugPrint.cu"
#include <stdio.h>
namespace gpu_algorithms
{
namespace cuda
{
namespace template_registration
{
//----------------------------------------------------------------------
// Kernel functions
//----------------------------------------------------------------------
// Multiplication of a complex signal a's magnitude with another unchanged complex signal b. Optionally shift the output.
template<bool param_inverse_shift>
static __global__ void ComplexPointwiseMagnitudeMulAndScale(const Complex* a, const Complex* b, Complex* out, const unsigned int stream_size, const unsigned int matrix_size, const float normalization_factor, const bool allow_highpass)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
Complex magnitude = (Complex) {sqrtf(a[i].x * a[i].x + a[i].y * a[i].y), 0.0f};
Complex product = (allow_highpass) ? ComplexMul(magnitude, b[i]) : magnitude;
unsigned int index = i;
if (param_inverse_shift)
{
int y = i / matrix_size;
int x = i - y * matrix_size;
index = SequentialIndex2DInverseFFTShift(x, y, matrix_size);
}
out[index] = (Complex) {product.x / normalization_factor, product.y / normalization_factor};
}
}
// Calculate a data stream of complex point-wise mean squared errors of the given input streams
static __global__ void ComplexPointwiseMeanSquaredError(const Complex *a, const Complex *b, Complex *out, const unsigned int stream_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
Complex difference = (Complex) {a[i].x - b[i].x, a[i].y - b[i].y};
Complex difference_squared = ComplexMul(difference, difference);
out[i] = difference_squared;
}
}
// Transformation of a complex cartesian matrix to polar space. Optionally zero-pad and shift the output.
static __global__ void Cartesian2PolarTransform(const Complex *idata, Complex *odata, const unsigned int rho_theta_matrix_stream_size, const unsigned int matrix_size, const unsigned int rho_size, const unsigned int theta_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int radius = (matrix_size - 1) / 2 + 1;
const float step_rho = (float) radius / rho_size;
//const float step_rho = sqrtf(2.0f * radius * radius) / rho_size; // TODO: Which one is better?
const float step_theta = 1.0f * M_PI / theta_size;
for (unsigned int i = threadID; i < rho_theta_matrix_stream_size; i += numThreads)
{
const unsigned int theta_n = i / rho_size;
const unsigned int rho_n = i - theta_n * rho_size;
Real data;
if (rho_n >= rho_size || theta_n >= theta_size)
{
data = 0.0f;
}
else
{
const float rho = rho_n * step_rho;
const float theta = theta_n * step_theta;
float x = rho * cos(theta) + (matrix_size - 1) / 2;
float y = rho * sin(theta) + (matrix_size - 1) / 2;
y = (float)matrix_size - 1.0f - y;
data = BilinearInterpolation(x, y, idata, matrix_size);
}
odata[i].x = data;
odata[i].y = 0.0f;
}
}
// Real to Complex with optional circular shift and optional weighting
template<bool param_shift, bool param_weighted>
static __global__ void Real2ComplexPadAndShift(const Real *idata, Complex *odata, const unsigned int size, const unsigned int matrix_size, const unsigned int matrix_size_expanded, const Real *weights)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int o_i_block_offset = (matrix_size_expanded - 1) / 2 - (matrix_size - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
int o_block_y = i / matrix_size_expanded;
int o_block_x = i - o_block_y * matrix_size_expanded;
const int i_block_x = o_block_x - o_i_block_offset;
const int i_block_y = o_block_y - o_i_block_offset;
Real data;
if(!(0 <= i_block_x && i_block_x < matrix_size && 0 <= i_block_y && i_block_y < matrix_size))
{
data = 0.0f;
}
else
{
const int i_matrix_x = i_block_x;
const int i_matrix_y = i_block_y;
Real weight = param_weighted ? weights[o_block_y * matrix_size_expanded + o_block_x] : 1.0f;
const bool is_valid_coordinate = (0 <= i_matrix_x && i_matrix_x < matrix_size && 0 <= i_matrix_y && i_matrix_y < matrix_size);
data = is_valid_coordinate ? idata[i_matrix_y * matrix_size + i_matrix_x] * weight: 0.0f;
}
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(o_block_x, o_block_y, matrix_size_expanded);
}
odata[index].x = data;
odata[index].y = 0.0f;
}
}
// Generate a high pass kernel in time domain
template<bool param_shift>
static __global__ void HighPassKernel(Complex *odata, const unsigned int size, const unsigned int matrix_size, const float sigma)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int offset = (matrix_size - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
const unsigned int y = i / matrix_size;
const unsigned int x = i - y * matrix_size;
const int x_o = x - offset;
const int y_o = y - offset;
float s = 2 * sigma * sigma;
float gaussian_lowpass = 1.0f / (M_PI * s) * (expf(-(x_o * x_o + y_o * y_o) / s));
float gaussian_highpass = (x_o == 0 && y_o == 0) ? 2.0f - gaussian_lowpass : -gaussian_lowpass;
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(x, y, matrix_size);
}
odata[index] = (Complex) {gaussian_highpass, 0.0f};
}
}
//----------------------------------------------------------------------
// Host functions
//----------------------------------------------------------------------
static __host__ float MeanSquaredError(const Complex *a, const Complex *b, const unsigned int nx, const unsigned int ny)
{
const unsigned int max_stream_threads_per_block = 256;
const unsigned int frame_stream_size = nx * ny;
const unsigned int stream_threads_per_block = min(max_stream_threads_per_block, frame_stream_size);
const unsigned int stream_blocks = ceil(frame_stream_size / (float) stream_threads_per_block);
Complex *errors;
hipMalloc((void**)&errors, frame_stream_size * sizeof(Complex));
const dim3 grid(ceil(frame_stream_size / (float) stream_threads_per_block));
const dim3 block(stream_threads_per_block);
// Calculate point-wise errors
hipLaunchKernelGGL(( ComplexPointwiseMeanSquaredError)
, dim3(grid), dim3(block), 0, 0,
a, b, errors, frame_stream_size
);
// Sum up point-wise errors
Complex squared_mean_error = (Complex) {0.0f, 0.0f};
SumReduce(errors, &squared_mean_error, frame_stream_size);
hipFree(errors);
return squared_mean_error.x;
}
template <bool param_rotation_allowed>
static __host__ void TanslationRotationEstimation(const Real *iframe_a_data, const Real *iframe_b_data, const unsigned int frame_a_nx, const unsigned int frame_b_nx, const bool allow_highpass_filtering = true, const unsigned int max_degree_resolution = 180)
{
// Computation threads per block for 1d data streams
const unsigned int stream_threads_per_block = 256;
const unsigned int frame_a_stream_size = frame_a_nx * frame_a_nx;
const unsigned int frame_b_stream_size = frame_b_nx * frame_b_nx;
const unsigned int nx = frame_a_nx;
const unsigned int frame_stream_size = frame_a_stream_size;
Real *frame_a_data,
*frame_b_data;
Complex *frame_a_complex,
*frame_b_complex;
hipMalloc((void**)&frame_a_data, (frame_a_stream_size + frame_b_stream_size) * sizeof(Complex));
hipMalloc((void**)&frame_a_complex, frame_a_stream_size * sizeof(Complex) * 2);
frame_b_data = &frame_a_data[frame_a_stream_size];
frame_b_complex = &frame_a_complex[frame_a_stream_size];
// Prepare grid, block and shared memory configuration for block matrix extraction
const dim3 k0_grid(ceil(frame_stream_size / (float) stream_threads_per_block));
const dim3 k0_block(stream_threads_per_block);
// Transfer input data to device memory
hipMemcpy(frame_a_data, iframe_a_data, frame_a_stream_size * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(frame_b_data, iframe_b_data, frame_b_stream_size * sizeof(Real), hipMemcpyHostToDevice);
// Expand and pad frame a
hipLaunchKernelGGL(( Real2ComplexPadAndShift<false, false>)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_a_data, frame_a_complex,
frame_a_stream_size,
frame_a_nx, frame_a_nx,
NULL
);
// Expand and pad shift frame b
hipLaunchKernelGGL(( Real2ComplexPadAndShift<false, false>)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_b_data, frame_b_complex,
frame_a_stream_size,
frame_b_nx, frame_a_nx,
NULL
);
float rotation_angle = 0.0f;
float corrected_mean_squared_error = 0.0f;
Vec2f translation_vector = (Vec2f) {0.0f, 0.0f};
if (!param_rotation_allowed)
{
translation_vector = phase_correlation::TranslationVector<false>(frame_a_complex, frame_b_complex, nx);
corrected_mean_squared_error = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
}
else
{
const unsigned int frame_polar_matrix_size_rho = (sqrt(2 * ((nx - 1) / 2 + 1) * ((nx - 1) / 2 + 1)));
const unsigned int frame_polar_matrix_size_theta = min(((2 * nx) / 4 * 4) , max_degree_resolution);
const unsigned int frame_stream_size = nx * nx;
const unsigned int frame_stream_size_polar = frame_polar_matrix_size_rho * frame_polar_matrix_size_theta;
Complex *frame_a_data_complex,
*frame_b_data_complex,
*frame_a_data_complex_filtered,
*frame_b_data_complex_filtered,
*highpass_kernel_complex,
*frame_a_data_polar_complex,
*frame_b_data_polar_complex;
hipMalloc((void**)&frame_a_data_complex, (5 * frame_stream_size + 2 * frame_stream_size_polar) * sizeof(Complex));
frame_b_data_complex = &frame_a_data_complex[frame_stream_size];
frame_a_data_complex_filtered = &frame_b_data_complex[frame_stream_size];
frame_b_data_complex_filtered = &frame_a_data_complex_filtered[frame_stream_size];
highpass_kernel_complex = &frame_b_data_complex_filtered[frame_stream_size];
frame_a_data_polar_complex = &highpass_kernel_complex[frame_stream_size_polar];
frame_b_data_polar_complex = &frame_a_data_polar_complex[frame_stream_size_polar];
// Prepare 1D FFT C2C batched plans
hipfftHandle plan_1d_complex_row, plan_1d_complex_col;
{
int n_row[] = {nx};
int n_col[] = {nx};
int inembed_row[] = {nx};
int onembed_row[] = {nx};
int inembed_col[] = {1};
int onembed_col[] = {1};
hipfftPlanMany(&plan_1d_complex_row, 1, n_row, inembed_row, 1, nx, onembed_row, 1, nx, HIPFFT_C2C, nx);
hipfftPlanMany(&plan_1d_complex_col, 1, n_col, inembed_col, nx, 1, onembed_col, nx, 1, HIPFFT_C2C, nx);
}
// Prepare grid and block configuration for polar transformations
const dim3 k1_grid(ceil(frame_stream_size_polar / (float) stream_threads_per_block));
const dim3 k1_block(stream_threads_per_block);
// Generate gaussian high pass filter kernel
hipLaunchKernelGGL(( HighPassKernel<true>)
, dim3(k0_grid), dim3(k0_block), 0, 0,
highpass_kernel_complex, frame_stream_size, nx, 0.3f
);
// FFT both frames first row-wise then column-wise
hipfftExecC2C(plan_1d_complex_row, frame_a_complex, frame_a_data_complex, HIPFFT_FORWARD);
hipfftExecC2C(plan_1d_complex_col, frame_a_data_complex, frame_a_data_complex, HIPFFT_FORWARD);
hipfftExecC2C(plan_1d_complex_row, frame_b_complex, frame_b_data_complex, HIPFFT_FORWARD);
hipfftExecC2C(plan_1d_complex_col, frame_b_data_complex, frame_b_data_complex, HIPFFT_FORWARD);
hipfftExecC2C(plan_1d_complex_row, highpass_kernel_complex, highpass_kernel_complex, HIPFFT_FORWARD);
hipfftExecC2C(plan_1d_complex_col, highpass_kernel_complex, highpass_kernel_complex, HIPFFT_FORWARD);
hipfftDestroy(plan_1d_complex_row);
hipfftDestroy(plan_1d_complex_col);
// High pass filter both frame's magnitudes
hipLaunchKernelGGL(( ComplexPointwiseMagnitudeMulAndScale<true>)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_a_data_complex, highpass_kernel_complex, frame_a_data_complex_filtered,
frame_stream_size, nx, frame_stream_size,
allow_highpass_filtering
);
hipLaunchKernelGGL(( ComplexPointwiseMagnitudeMulAndScale<true>)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_b_data_complex, highpass_kernel_complex, frame_b_data_complex_filtered,
frame_stream_size, nx, frame_stream_size,
allow_highpass_filtering
);
// Transform both frames FFT coefficients to polar space
hipLaunchKernelGGL(( Cartesian2PolarTransform)
, dim3(k1_grid), dim3(k1_block), 0, 0,
frame_a_data_complex_filtered, frame_a_data_polar_complex,
frame_stream_size_polar, nx,
frame_polar_matrix_size_rho,
frame_polar_matrix_size_theta
);
hipLaunchKernelGGL(( Cartesian2PolarTransform)
, dim3(k1_grid), dim3(k1_block), 0, 0,
frame_b_data_complex_filtered, frame_b_data_polar_complex,
frame_stream_size_polar, nx,
frame_polar_matrix_size_rho,
frame_polar_matrix_size_theta
);
// Correlate polar frames and calculate estimated rotation
// Note: Phase correlation cannot distinguish between angle and angle + 180 degree => try both and measure errors
const unsigned int peak_index_rotation = phase_correlation::PeakIndex<true, false>(frame_a_data_polar_complex, frame_b_data_polar_complex, frame_polar_matrix_size_rho, frame_polar_matrix_size_theta);
float base_rotation = M_PI * ((float) peak_index_rotation / frame_polar_matrix_size_rho) / frame_polar_matrix_size_theta;
float rotation_angle_1 = base_rotation;
hipLaunchKernelGGL(( Rotate)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_b_complex, frame_a_data_complex_filtered,
frame_stream_size,
nx, rotation_angle_1
);
float rotation_angle_2 = base_rotation + M_PI;
hipLaunchKernelGGL(( Rotate)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_b_complex, frame_b_data_complex_filtered,
frame_stream_size,
nx, rotation_angle_2
);
Vec2f translation_vector_1 = phase_correlation::TranslationVector<false>(frame_a_complex, frame_a_data_complex_filtered, nx);
hipLaunchKernelGGL(( Translate)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_a_data_complex_filtered, frame_b_complex,
frame_stream_size,
nx,
(int) round(translation_vector_1.x), (int) round(translation_vector_1.y)
);
const float mean_squared_error_1 = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
Vec2f translation_vector_2 = phase_correlation::TranslationVector<false>(frame_a_complex, frame_b_data_complex_filtered, nx);
hipLaunchKernelGGL(( Translate)
, dim3(k0_grid), dim3(k0_block), 0, 0,
frame_b_data_complex_filtered, frame_b_complex,
frame_stream_size,
nx,
(int) round(translation_vector_2.x), (int) round(translation_vector_2.y)
);
const float mean_squared_error_2 = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
if (mean_squared_error_1 < mean_squared_error_2)
{
rotation_angle = rotation_angle_1;
translation_vector = translation_vector_1;
corrected_mean_squared_error = mean_squared_error_1;
}
else
{
rotation_angle = rotation_angle_2;
translation_vector = translation_vector_2;
corrected_mean_squared_error = mean_squared_error_2;
}
}
printf("Rotation: %4.2f \n", rotation_angle * 180.0f / M_PI);
printf("Translation: (%0.0f, %0.0f) \n", translation_vector.x, translation_vector.y);
printf("Remaining error: %4.4f \n", corrected_mean_squared_error);
}
__host__ void TemplateRegistration(const float* iframe_a_data, const float* iframe_b_data, float* result_frame, const unsigned int frame_a_matrix_size, const unsigned int frame_b_matrix_size, bool weighting_window, bool rotation_allowed)
{
TanslationRotationEstimation<true>(iframe_a_data, iframe_b_data, frame_a_matrix_size, frame_b_matrix_size);
}
}
}
}
|
2f65de0a751868779c01263c6c080b3e2de3aff7.cu
|
//----------------------------------------------------------------------
/*!\file gpu_algorithms/templateRegistration.cu
*
* \author Felix Laufer
*
*
* CUDA: Fast rotation-invariant template registration on large 2D matrices
*
*/
//----------------------------------------------------------------------
#include <cufft.h>
#include "gpu_algorithms/phaseCorrelation.cu"
#include "gpu_algorithms/debugPrint.cu"
#include <stdio.h>
namespace gpu_algorithms
{
namespace cuda
{
namespace template_registration
{
//----------------------------------------------------------------------
// Kernel functions
//----------------------------------------------------------------------
// Multiplication of a complex signal a's magnitude with another unchanged complex signal b. Optionally shift the output.
template<bool param_inverse_shift>
static __global__ void ComplexPointwiseMagnitudeMulAndScale(const Complex* a, const Complex* b, Complex* out, const unsigned int stream_size, const unsigned int matrix_size, const float normalization_factor, const bool allow_highpass)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
Complex magnitude = (Complex) {sqrtf(a[i].x * a[i].x + a[i].y * a[i].y), 0.0f};
Complex product = (allow_highpass) ? ComplexMul(magnitude, b[i]) : magnitude;
unsigned int index = i;
if (param_inverse_shift)
{
int y = i / matrix_size;
int x = i - y * matrix_size;
index = SequentialIndex2DInverseFFTShift(x, y, matrix_size);
}
out[index] = (Complex) {product.x / normalization_factor, product.y / normalization_factor};
}
}
// Calculate a data stream of complex point-wise mean squared errors of the given input streams
static __global__ void ComplexPointwiseMeanSquaredError(const Complex *a, const Complex *b, Complex *out, const unsigned int stream_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
Complex difference = (Complex) {a[i].x - b[i].x, a[i].y - b[i].y};
Complex difference_squared = ComplexMul(difference, difference);
out[i] = difference_squared;
}
}
// Transformation of a complex cartesian matrix to polar space. Optionally zero-pad and shift the output.
static __global__ void Cartesian2PolarTransform(const Complex *idata, Complex *odata, const unsigned int rho_theta_matrix_stream_size, const unsigned int matrix_size, const unsigned int rho_size, const unsigned int theta_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int radius = (matrix_size - 1) / 2 + 1;
const float step_rho = (float) radius / rho_size;
//const float step_rho = sqrtf(2.0f * radius * radius) / rho_size; // TODO: Which one is better?
const float step_theta = 1.0f * M_PI / theta_size;
for (unsigned int i = threadID; i < rho_theta_matrix_stream_size; i += numThreads)
{
const unsigned int theta_n = i / rho_size;
const unsigned int rho_n = i - theta_n * rho_size;
Real data;
if (rho_n >= rho_size || theta_n >= theta_size)
{
data = 0.0f;
}
else
{
const float rho = rho_n * step_rho;
const float theta = theta_n * step_theta;
float x = rho * cos(theta) + (matrix_size - 1) / 2;
float y = rho * sin(theta) + (matrix_size - 1) / 2;
y = (float)matrix_size - 1.0f - y;
data = BilinearInterpolation(x, y, idata, matrix_size);
}
odata[i].x = data;
odata[i].y = 0.0f;
}
}
// Real to Complex with optional circular shift and optional weighting
template<bool param_shift, bool param_weighted>
static __global__ void Real2ComplexPadAndShift(const Real *idata, Complex *odata, const unsigned int size, const unsigned int matrix_size, const unsigned int matrix_size_expanded, const Real *weights)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int o_i_block_offset = (matrix_size_expanded - 1) / 2 - (matrix_size - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
int o_block_y = i / matrix_size_expanded;
int o_block_x = i - o_block_y * matrix_size_expanded;
const int i_block_x = o_block_x - o_i_block_offset;
const int i_block_y = o_block_y - o_i_block_offset;
Real data;
if(!(0 <= i_block_x && i_block_x < matrix_size && 0 <= i_block_y && i_block_y < matrix_size))
{
data = 0.0f;
}
else
{
const int i_matrix_x = i_block_x;
const int i_matrix_y = i_block_y;
Real weight = param_weighted ? weights[o_block_y * matrix_size_expanded + o_block_x] : 1.0f;
const bool is_valid_coordinate = (0 <= i_matrix_x && i_matrix_x < matrix_size && 0 <= i_matrix_y && i_matrix_y < matrix_size);
data = is_valid_coordinate ? idata[i_matrix_y * matrix_size + i_matrix_x] * weight: 0.0f;
}
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(o_block_x, o_block_y, matrix_size_expanded);
}
odata[index].x = data;
odata[index].y = 0.0f;
}
}
// Generate a high pass kernel in time domain
template<bool param_shift>
static __global__ void HighPassKernel(Complex *odata, const unsigned int size, const unsigned int matrix_size, const float sigma)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int offset = (matrix_size - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
const unsigned int y = i / matrix_size;
const unsigned int x = i - y * matrix_size;
const int x_o = x - offset;
const int y_o = y - offset;
float s = 2 * sigma * sigma;
float gaussian_lowpass = 1.0f / (M_PI * s) * (expf(-(x_o * x_o + y_o * y_o) / s));
float gaussian_highpass = (x_o == 0 && y_o == 0) ? 2.0f - gaussian_lowpass : -gaussian_lowpass;
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(x, y, matrix_size);
}
odata[index] = (Complex) {gaussian_highpass, 0.0f};
}
}
//----------------------------------------------------------------------
// Host functions
//----------------------------------------------------------------------
static __host__ float MeanSquaredError(const Complex *a, const Complex *b, const unsigned int nx, const unsigned int ny)
{
const unsigned int max_stream_threads_per_block = 256;
const unsigned int frame_stream_size = nx * ny;
const unsigned int stream_threads_per_block = min(max_stream_threads_per_block, frame_stream_size);
const unsigned int stream_blocks = ceil(frame_stream_size / (float) stream_threads_per_block);
Complex *errors;
cudaMalloc((void**)&errors, frame_stream_size * sizeof(Complex));
const dim3 grid(ceil(frame_stream_size / (float) stream_threads_per_block));
const dim3 block(stream_threads_per_block);
// Calculate point-wise errors
ComplexPointwiseMeanSquaredError
<<<grid, block>>>
(
a, b, errors, frame_stream_size
);
// Sum up point-wise errors
Complex squared_mean_error = (Complex) {0.0f, 0.0f};
SumReduce(errors, &squared_mean_error, frame_stream_size);
cudaFree(errors);
return squared_mean_error.x;
}
template <bool param_rotation_allowed>
static __host__ void TanslationRotationEstimation(const Real *iframe_a_data, const Real *iframe_b_data, const unsigned int frame_a_nx, const unsigned int frame_b_nx, const bool allow_highpass_filtering = true, const unsigned int max_degree_resolution = 180)
{
// Computation threads per block for 1d data streams
const unsigned int stream_threads_per_block = 256;
const unsigned int frame_a_stream_size = frame_a_nx * frame_a_nx;
const unsigned int frame_b_stream_size = frame_b_nx * frame_b_nx;
const unsigned int nx = frame_a_nx;
const unsigned int frame_stream_size = frame_a_stream_size;
Real *frame_a_data,
*frame_b_data;
Complex *frame_a_complex,
*frame_b_complex;
cudaMalloc((void**)&frame_a_data, (frame_a_stream_size + frame_b_stream_size) * sizeof(Complex));
cudaMalloc((void**)&frame_a_complex, frame_a_stream_size * sizeof(Complex) * 2);
frame_b_data = &frame_a_data[frame_a_stream_size];
frame_b_complex = &frame_a_complex[frame_a_stream_size];
// Prepare grid, block and shared memory configuration for block matrix extraction
const dim3 k0_grid(ceil(frame_stream_size / (float) stream_threads_per_block));
const dim3 k0_block(stream_threads_per_block);
// Transfer input data to device memory
cudaMemcpy(frame_a_data, iframe_a_data, frame_a_stream_size * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(frame_b_data, iframe_b_data, frame_b_stream_size * sizeof(Real), cudaMemcpyHostToDevice);
// Expand and pad frame a
Real2ComplexPadAndShift<false, false>
<<<k0_grid, k0_block>>>
(
frame_a_data, frame_a_complex,
frame_a_stream_size,
frame_a_nx, frame_a_nx,
NULL
);
// Expand and pad shift frame b
Real2ComplexPadAndShift<false, false>
<<<k0_grid, k0_block>>>
(
frame_b_data, frame_b_complex,
frame_a_stream_size,
frame_b_nx, frame_a_nx,
NULL
);
float rotation_angle = 0.0f;
float corrected_mean_squared_error = 0.0f;
Vec2f translation_vector = (Vec2f) {0.0f, 0.0f};
if (!param_rotation_allowed)
{
translation_vector = phase_correlation::TranslationVector<false>(frame_a_complex, frame_b_complex, nx);
corrected_mean_squared_error = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
}
else
{
const unsigned int frame_polar_matrix_size_rho = (sqrt(2 * ((nx - 1) / 2 + 1) * ((nx - 1) / 2 + 1)));
const unsigned int frame_polar_matrix_size_theta = min(((2 * nx) / 4 * 4) , max_degree_resolution);
const unsigned int frame_stream_size = nx * nx;
const unsigned int frame_stream_size_polar = frame_polar_matrix_size_rho * frame_polar_matrix_size_theta;
Complex *frame_a_data_complex,
*frame_b_data_complex,
*frame_a_data_complex_filtered,
*frame_b_data_complex_filtered,
*highpass_kernel_complex,
*frame_a_data_polar_complex,
*frame_b_data_polar_complex;
cudaMalloc((void**)&frame_a_data_complex, (5 * frame_stream_size + 2 * frame_stream_size_polar) * sizeof(Complex));
frame_b_data_complex = &frame_a_data_complex[frame_stream_size];
frame_a_data_complex_filtered = &frame_b_data_complex[frame_stream_size];
frame_b_data_complex_filtered = &frame_a_data_complex_filtered[frame_stream_size];
highpass_kernel_complex = &frame_b_data_complex_filtered[frame_stream_size];
frame_a_data_polar_complex = &highpass_kernel_complex[frame_stream_size_polar];
frame_b_data_polar_complex = &frame_a_data_polar_complex[frame_stream_size_polar];
// Prepare 1D FFT C2C batched plans
cufftHandle plan_1d_complex_row, plan_1d_complex_col;
{
int n_row[] = {nx};
int n_col[] = {nx};
int inembed_row[] = {nx};
int onembed_row[] = {nx};
int inembed_col[] = {1};
int onembed_col[] = {1};
cufftPlanMany(&plan_1d_complex_row, 1, n_row, inembed_row, 1, nx, onembed_row, 1, nx, CUFFT_C2C, nx);
cufftPlanMany(&plan_1d_complex_col, 1, n_col, inembed_col, nx, 1, onembed_col, nx, 1, CUFFT_C2C, nx);
}
// Prepare grid and block configuration for polar transformations
const dim3 k1_grid(ceil(frame_stream_size_polar / (float) stream_threads_per_block));
const dim3 k1_block(stream_threads_per_block);
// Generate gaussian high pass filter kernel
HighPassKernel<true>
<<<k0_grid, k0_block>>>
(
highpass_kernel_complex, frame_stream_size, nx, 0.3f
);
// FFT both frames first row-wise then column-wise
cufftExecC2C(plan_1d_complex_row, frame_a_complex, frame_a_data_complex, CUFFT_FORWARD);
cufftExecC2C(plan_1d_complex_col, frame_a_data_complex, frame_a_data_complex, CUFFT_FORWARD);
cufftExecC2C(plan_1d_complex_row, frame_b_complex, frame_b_data_complex, CUFFT_FORWARD);
cufftExecC2C(plan_1d_complex_col, frame_b_data_complex, frame_b_data_complex, CUFFT_FORWARD);
cufftExecC2C(plan_1d_complex_row, highpass_kernel_complex, highpass_kernel_complex, CUFFT_FORWARD);
cufftExecC2C(plan_1d_complex_col, highpass_kernel_complex, highpass_kernel_complex, CUFFT_FORWARD);
cufftDestroy(plan_1d_complex_row);
cufftDestroy(plan_1d_complex_col);
// High pass filter both frame's magnitudes
ComplexPointwiseMagnitudeMulAndScale<true>
<<<k0_grid, k0_block>>>
(
frame_a_data_complex, highpass_kernel_complex, frame_a_data_complex_filtered,
frame_stream_size, nx, frame_stream_size,
allow_highpass_filtering
);
ComplexPointwiseMagnitudeMulAndScale<true>
<<<k0_grid, k0_block>>>
(
frame_b_data_complex, highpass_kernel_complex, frame_b_data_complex_filtered,
frame_stream_size, nx, frame_stream_size,
allow_highpass_filtering
);
// Transform both frames FFT coefficients to polar space
Cartesian2PolarTransform
<<<k1_grid, k1_block>>>
(
frame_a_data_complex_filtered, frame_a_data_polar_complex,
frame_stream_size_polar, nx,
frame_polar_matrix_size_rho,
frame_polar_matrix_size_theta
);
Cartesian2PolarTransform
<<<k1_grid, k1_block>>>
(
frame_b_data_complex_filtered, frame_b_data_polar_complex,
frame_stream_size_polar, nx,
frame_polar_matrix_size_rho,
frame_polar_matrix_size_theta
);
// Correlate polar frames and calculate estimated rotation
// Note: Phase correlation cannot distinguish between angle and angle + 180 degree => try both and measure errors
const unsigned int peak_index_rotation = phase_correlation::PeakIndex<true, false>(frame_a_data_polar_complex, frame_b_data_polar_complex, frame_polar_matrix_size_rho, frame_polar_matrix_size_theta);
float base_rotation = M_PI * ((float) peak_index_rotation / frame_polar_matrix_size_rho) / frame_polar_matrix_size_theta;
float rotation_angle_1 = base_rotation;
Rotate
<<<k0_grid, k0_block>>>
(
frame_b_complex, frame_a_data_complex_filtered,
frame_stream_size,
nx, rotation_angle_1
);
float rotation_angle_2 = base_rotation + M_PI;
Rotate
<<<k0_grid, k0_block>>>
(
frame_b_complex, frame_b_data_complex_filtered,
frame_stream_size,
nx, rotation_angle_2
);
Vec2f translation_vector_1 = phase_correlation::TranslationVector<false>(frame_a_complex, frame_a_data_complex_filtered, nx);
Translate
<<<k0_grid, k0_block>>>
(
frame_a_data_complex_filtered, frame_b_complex,
frame_stream_size,
nx,
(int) round(translation_vector_1.x), (int) round(translation_vector_1.y)
);
const float mean_squared_error_1 = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
Vec2f translation_vector_2 = phase_correlation::TranslationVector<false>(frame_a_complex, frame_b_data_complex_filtered, nx);
Translate
<<<k0_grid, k0_block>>>
(
frame_b_data_complex_filtered, frame_b_complex,
frame_stream_size,
nx,
(int) round(translation_vector_2.x), (int) round(translation_vector_2.y)
);
const float mean_squared_error_2 = MeanSquaredError(frame_b_complex, frame_a_complex, nx, nx);
if (mean_squared_error_1 < mean_squared_error_2)
{
rotation_angle = rotation_angle_1;
translation_vector = translation_vector_1;
corrected_mean_squared_error = mean_squared_error_1;
}
else
{
rotation_angle = rotation_angle_2;
translation_vector = translation_vector_2;
corrected_mean_squared_error = mean_squared_error_2;
}
}
printf("Rotation: %4.2f° \n", rotation_angle * 180.0f / M_PI);
printf("Translation: (%0.0f, %0.0f) \n", translation_vector.x, translation_vector.y);
printf("Remaining error: %4.4f \n", corrected_mean_squared_error);
}
__host__ void TemplateRegistration(const float* iframe_a_data, const float* iframe_b_data, float* result_frame, const unsigned int frame_a_matrix_size, const unsigned int frame_b_matrix_size, bool weighting_window, bool rotation_allowed)
{
TanslationRotationEstimation<true>(iframe_a_data, iframe_b_data, frame_a_matrix_size, frame_b_matrix_size);
}
}
}
}
|
40eb8d749f009658c864c734dd0cbf08bbf59ba5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "rocblas.h"
#define M 5
#define N 2
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
void printMat(float* mat, int m, int n){
for(int row = 0; row < m; row++){
for(int col = 0; col < n; col++){
printf("%.f ",mat[IDX2C(row, col, m)]);
}
printf("\n");
}
printf("\n");
}
int main(void){
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int row, col;
float* h_A;
float* h_B;
float* h_C;
float* d_A = 0;
float* d_B = 0;
float* d_C = 0;
const float alpha = 1.0f;
const float beta = 0.0f;
// Allocating host memory
h_A = (float*)malloc(M * N * sizeof(float));
h_B = (float*)malloc(N * M * sizeof(float));
h_C = (float*)malloc(M * M * sizeof(float));
if (!h_A || !h_B || !h_C){
printf("host memory allocation failed\n");
return EXIT_FAILURE;
}
// Initilize host memory
for (col = 0; col < N; col++){
for(row = 0; row < M; row++){
h_A[IDX2C(row, col, M)] = (float)(row*N + col + 1);
h_B[IDX2C(col, row, N)] = (float)(col*M + row + 1);
}
}
for (int i = 0; i < M*N ; i++){
printf("%.f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < M*N ; i++){
printf("%.f ", h_B[i]);
}
printf("\n");
printf("host A(%dx%d) = \n", M, N);
printMat(h_A, M, N);
printf("host B(%dx%d) = \n", N, M);
printMat(h_B, N, M);
for (col = 0; col < M; col++){
for(row = 0; row < M; row++){
h_C[IDX2C(row, col, M)] = (float)(row*M + col + 1);
}
}
// printf("host C(%dx%d) = \n", M, M);
// printMat(h_C, M, M);
// Allocating device memory
cudaStat = hipMalloc((void**)&d_A, M*N*sizeof(float));
cudaStat = hipMalloc((void**)&d_B, N*M*sizeof(float));
cudaStat = hipMalloc((void**)&d_C, M*M*sizeof(float));
if (cudaStat != hipSuccess){
printf("device memory allocation failed\n");
return EXIT_FAILURE;
}
// Initialize CUBLAS & Create Handle
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS){
printf("CUBLAS initilization failed\n");
return EXIT_FAILURE;
}
// Copying matrix values to device memory
stat = hipblasSetMatrix(M, N, sizeof(float), h_A, M, d_A, M);
stat = hipblasSetMatrix(N, M, sizeof(float), h_B, N, d_B, N);
stat = hipblasSetMatrix(M, M, sizeof(float), h_C, M, d_C, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
// Run cublas 32bit integer type gemm
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, M, N, &alpha, d_A, M, d_B, N, &beta, d_C, M);
printf("%d",stat);
if (stat != HIPBLAS_STATUS_SUCCESS) {
// Occurs problem in Changmo's Device
// hipblasSgemm() returns error code 13 (HIPBLAS_STATUS_EXECUTION_FAILED),
// while not showing problem in calculation result.
printf ("cublas gemm error\n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
// copy result from device to host
stat = hipblasGetMatrix(M, M, sizeof(float), d_C, M, h_C, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipblasDestroy(handle);
// Print ouput
printf("host C(%dx%d) = \n", M, M);
printMat(h_C, M, M);
free(h_A);
free(h_B);
free(h_C);
return EXIT_SUCCESS;
}
|
40eb8d749f009658c864c734dd0cbf08bbf59ba5.cu
|
#include "stdio.h"
#include "cublas_v2.h"
#define M 5
#define N 2
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
void printMat(float* mat, int m, int n){
for(int row = 0; row < m; row++){
for(int col = 0; col < n; col++){
printf("%.f ",mat[IDX2C(row, col, m)]);
}
printf("\n");
}
printf("\n");
}
int main(void){
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int row, col;
float* h_A;
float* h_B;
float* h_C;
float* d_A = 0;
float* d_B = 0;
float* d_C = 0;
const float alpha = 1.0f;
const float beta = 0.0f;
// Allocating host memory
h_A = (float*)malloc(M * N * sizeof(float));
h_B = (float*)malloc(N * M * sizeof(float));
h_C = (float*)malloc(M * M * sizeof(float));
if (!h_A || !h_B || !h_C){
printf("host memory allocation failed\n");
return EXIT_FAILURE;
}
// Initilize host memory
for (col = 0; col < N; col++){
for(row = 0; row < M; row++){
h_A[IDX2C(row, col, M)] = (float)(row*N + col + 1);
h_B[IDX2C(col, row, N)] = (float)(col*M + row + 1);
}
}
for (int i = 0; i < M*N ; i++){
printf("%.f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < M*N ; i++){
printf("%.f ", h_B[i]);
}
printf("\n");
printf("host A(%dx%d) = \n", M, N);
printMat(h_A, M, N);
printf("host B(%dx%d) = \n", N, M);
printMat(h_B, N, M);
for (col = 0; col < M; col++){
for(row = 0; row < M; row++){
h_C[IDX2C(row, col, M)] = (float)(row*M + col + 1);
}
}
// printf("host C(%dx%d) = \n", M, M);
// printMat(h_C, M, M);
// Allocating device memory
cudaStat = cudaMalloc((void**)&d_A, M*N*sizeof(float));
cudaStat = cudaMalloc((void**)&d_B, N*M*sizeof(float));
cudaStat = cudaMalloc((void**)&d_C, M*M*sizeof(float));
if (cudaStat != cudaSuccess){
printf("device memory allocation failed\n");
return EXIT_FAILURE;
}
// Initialize CUBLAS & Create Handle
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS){
printf("CUBLAS initilization failed\n");
return EXIT_FAILURE;
}
// Copying matrix values to device memory
stat = cublasSetMatrix(M, N, sizeof(float), h_A, M, d_A, M);
stat = cublasSetMatrix(N, M, sizeof(float), h_B, N, d_B, N);
stat = cublasSetMatrix(M, M, sizeof(float), h_C, M, d_C, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cublasDestroy(handle);
return EXIT_FAILURE;
}
// Run cublas 32bit integer type gemm
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, M, N, &alpha, d_A, M, d_B, N, &beta, d_C, M);
printf("%d",stat);
if (stat != CUBLAS_STATUS_SUCCESS) {
// Occurs problem in Changmo's Device
// cublasSgemm() returns error code 13 (CUBLAS_STATUS_EXECUTION_FAILED),
// while not showing problem in calculation result.
printf ("cublas gemm error\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cublasDestroy(handle);
return EXIT_FAILURE;
}
// copy result from device to host
stat = cublasGetMatrix(M, M, sizeof(float), d_C, M, h_C, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cublasDestroy(handle);
// Print ouput
printf("host C(%dx%d) = \n", M, M);
printMat(h_C, M, M);
free(h_A);
free(h_B);
free(h_C);
return EXIT_SUCCESS;
}
|
f078abe034a63283e7e1a285abb36256655728d2.hip
|
// !!! This is a file automatically generated by hipify!!!
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. avzis <[email protected]>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// pyre
#include <pyre/journal.h>
// cuda
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// pull the declarations
#include "kernels.h"
// the correlation kernel
template <typename pixel_t = hipComplex>
__global__
void
_migrate(const pixel_t * coarse,
std::size_t cellsPerPair, std::size_t cellsPerRefinedPair,
std::size_t refCells, std::size_t tgtCells,
std::size_t refRefinedCells, std::size_t tgtRefinedCells,
std::size_t rdim, std::size_t tdim, std::size_t edim, std::size_t trdim,
const int * locations,
pixel_t * refined);
// implementation
void
ampcor::cuda::kernels::
migrate(const std::complex<float> * coarse,
std::size_t pairs,
std::size_t refDim, std::size_t tgtDim, std::size_t expDim,
std::size_t refRefinedDim, std::size_t tgtRefinedDim,
const int * locations,
std::complex<float> * refined)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// figure out the job layout and launch the calculation on the device
// each thread block takes care of one tile pair, so we need as many blocks as there are pairs
auto B = pairs;
// the number of threads per block is determined by the shape of the expanded maxcor tile;
// we round up to the next warp
auto T = 32 * (expDim / 32 + (expDim % 32 ? 1 : 0));
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T << " threads each"
<< "to migrate the expanded maxcor tiles to the refinement arena"
<< pyre::journal::endl;
// shape calculations
auto refCells = refDim * refDim;
auto tgtCells = tgtDim * tgtDim;
auto refRefinedCells = refRefinedDim * refRefinedDim;
auto tgtRefinedCells = tgtRefinedDim * tgtRefinedDim;
// so i can skip over work others are doing
auto cellsPerPair = refCells + tgtCells;
auto cellsPerRefinedPair = refRefinedCells + tgtRefinedCells;
// launch
hipLaunchKernelGGL(( _migrate) , dim3(B),dim3(T), 0, 0, reinterpret_cast<const hipComplex *>(coarse),
cellsPerPair, cellsPerRefinedPair,
refCells, tgtCells, refRefinedCells, tgtRefinedCells,
refDim, tgtDim, expDim, tgtRefinedDim,
locations,
reinterpret_cast<hipComplex *>(refined));
// wait for the device to finish
auto status = hipDeviceSynchronize();
// check
if (status != hipSuccess) {
// get the error description
std::string description = hipGetErrorName(status);
// make a channel
pyre::journal::error_t error("ampcor.cuda");
// complain
error
<< pyre::journal::at(__HERE__)
<< "while migrating the maxcor tiles to the refinement arena: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the correlation kernel
template <typename pixel_t>
__global__
void
_migrate(const pixel_t * coarse,
std::size_t cellsPerPair, std::size_t cellsPerRefinedPair,
std::size_t refCells, std::size_t tgtCells,
std::size_t refRefinedCells, std::size_t tgtRefinedCells,
std::size_t rdim, std::size_t tdim, std::size_t edim, std::size_t trdim,
const int * locations,
pixel_t * refined)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
// std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
// std::size_t w = b*T + t; // my worker id
// each thread transfers column {t} of the expanded maxcor tile from pair {b} to the
// refinement area
// if there is no work for me
if (t >= edim) {
// bail
return;
}
// unpack the location of the ULHC of my maxcor tile
auto row = locations[2*b];
auto col = locations[2*b + 1];
// the source: (row, col) of the target tile of pair {b} in the coarse arena
const pixel_t * src = coarse + b*cellsPerPair + refCells + row*tdim + col + t;
// the destination: the target tile of pair {b} in the refined arena
pixel_t * dest = refined + b*cellsPerRefinedPair + refRefinedCells + t;
// printf("thread [b=%lu,t=%lu]: loc=(%d,%d)\n", b, t, row, col);
// go down the columns in tandem
for (auto jdx = 0; jdx < edim; ++jdx) {
// move the data
*dest = *src;
// update the pointers
// source moves by a whole row in the target tile
src += tdim;
// destination moves by a whole row in the refined target tile
dest += trdim;
}
// all done
return;
}
// end of file
|
f078abe034a63283e7e1a285abb36256655728d2.cu
|
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. aïvázis <[email protected]>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// pyre
#include <pyre/journal.h>
// cuda
#include <cuda_runtime.h>
#include <cooperative_groups.h>
// pull the declarations
#include "kernels.h"
// the correlation kernel
template <typename pixel_t = cuComplex>
__global__
void
_migrate(const pixel_t * coarse,
std::size_t cellsPerPair, std::size_t cellsPerRefinedPair,
std::size_t refCells, std::size_t tgtCells,
std::size_t refRefinedCells, std::size_t tgtRefinedCells,
std::size_t rdim, std::size_t tdim, std::size_t edim, std::size_t trdim,
const int * locations,
pixel_t * refined);
// implementation
void
ampcor::cuda::kernels::
migrate(const std::complex<float> * coarse,
std::size_t pairs,
std::size_t refDim, std::size_t tgtDim, std::size_t expDim,
std::size_t refRefinedDim, std::size_t tgtRefinedDim,
const int * locations,
std::complex<float> * refined)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// figure out the job layout and launch the calculation on the device
// each thread block takes care of one tile pair, so we need as many blocks as there are pairs
auto B = pairs;
// the number of threads per block is determined by the shape of the expanded maxcor tile;
// we round up to the next warp
auto T = 32 * (expDim / 32 + (expDim % 32 ? 1 : 0));
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T << " threads each"
<< "to migrate the expanded maxcor tiles to the refinement arena"
<< pyre::journal::endl;
// shape calculations
auto refCells = refDim * refDim;
auto tgtCells = tgtDim * tgtDim;
auto refRefinedCells = refRefinedDim * refRefinedDim;
auto tgtRefinedCells = tgtRefinedDim * tgtRefinedDim;
// so i can skip over work others are doing
auto cellsPerPair = refCells + tgtCells;
auto cellsPerRefinedPair = refRefinedCells + tgtRefinedCells;
// launch
_migrate <<<B,T>>> (reinterpret_cast<const cuComplex *>(coarse),
cellsPerPair, cellsPerRefinedPair,
refCells, tgtCells, refRefinedCells, tgtRefinedCells,
refDim, tgtDim, expDim, tgtRefinedDim,
locations,
reinterpret_cast<cuComplex *>(refined));
// wait for the device to finish
auto status = cudaDeviceSynchronize();
// check
if (status != cudaSuccess) {
// get the error description
std::string description = cudaGetErrorName(status);
// make a channel
pyre::journal::error_t error("ampcor.cuda");
// complain
error
<< pyre::journal::at(__HERE__)
<< "while migrating the maxcor tiles to the refinement arena: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the correlation kernel
template <typename pixel_t>
__global__
void
_migrate(const pixel_t * coarse,
std::size_t cellsPerPair, std::size_t cellsPerRefinedPair,
std::size_t refCells, std::size_t tgtCells,
std::size_t refRefinedCells, std::size_t tgtRefinedCells,
std::size_t rdim, std::size_t tdim, std::size_t edim, std::size_t trdim,
const int * locations,
pixel_t * refined)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
// std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
// std::size_t w = b*T + t; // my worker id
// each thread transfers column {t} of the expanded maxcor tile from pair {b} to the
// refinement area
// if there is no work for me
if (t >= edim) {
// bail
return;
}
// unpack the location of the ULHC of my maxcor tile
auto row = locations[2*b];
auto col = locations[2*b + 1];
// the source: (row, col) of the target tile of pair {b} in the coarse arena
const pixel_t * src = coarse + b*cellsPerPair + refCells + row*tdim + col + t;
// the destination: the target tile of pair {b} in the refined arena
pixel_t * dest = refined + b*cellsPerRefinedPair + refRefinedCells + t;
// printf("thread [b=%lu,t=%lu]: loc=(%d,%d)\n", b, t, row, col);
// go down the columns in tandem
for (auto jdx = 0; jdx < edim; ++jdx) {
// move the data
*dest = *src;
// update the pointers
// source moves by a whole row in the target tile
src += tdim;
// destination moves by a whole row in the refined target tile
dest += trdim;
}
// all done
return;
}
// end of file
|
6ba172980dbacf05076f01c4a4b323ccc26ebc08.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
for (int i=0; i < var_1; ++i) {
comp = +1.1512E35f + +1.1885E35f;
float tmp_1 = -1.6908E-36f / var_3;
var_2[i] = var_4 + var_5;
comp += var_2[i] - tmp_1 * +0.0f * var_6 + -1.8646E-36f + (var_7 / -1.8247E-41f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
hipDeviceSynchronize();
return 0;
}
|
6ba172980dbacf05076f01c4a4b323ccc26ebc08.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
for (int i=0; i < var_1; ++i) {
comp = +1.1512E35f + +1.1885E35f;
float tmp_1 = -1.6908E-36f / var_3;
var_2[i] = var_4 + var_5;
comp += var_2[i] - tmp_1 * +0.0f * var_6 + -1.8646E-36f + (var_7 / -1.8247E-41f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
cudaDeviceSynchronize();
return 0;
}
|
6bd3d8330e23e39add278f98dcf872587a946020.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
//Sobel
__global__ void sobelInCuda(unsigned char* dataIn, unsigned char* dataOut, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int index = yIndex * imgWidth + xIndex;
int Gx = 0;
int Gy = 0;
if (xIndex > 0 && xIndex < imgWidth - 1 && yIndex > 0 && yIndex < imgHeight - 1)
{
Gx = dataIn[(yIndex - 1) * imgWidth + xIndex + 1] + 2 * dataIn[yIndex * imgWidth + xIndex + 1] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[yIndex * imgWidth + xIndex - 1] + dataIn[(yIndex + 1) * imgWidth + xIndex - 1]);
Gy = dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex - 1) * imgWidth + xIndex] + dataIn[(yIndex - 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex + 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex + 1) * imgWidth + xIndex] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]);
dataOut[index] = (abs(Gx) + abs(Gy)) / 2;
}
}
//SobelCPU
void sobel(Mat srcImg, Mat dstImg, int imgHeight, int imgWidth)
{
int Gx = 0;
int Gy = 0;
for (int i = 1; i < imgHeight - 1; i++)
{
uchar* dataUp = srcImg.ptr<uchar>(i - 1);
uchar* data = srcImg.ptr<uchar>(i);
uchar* dataDown = srcImg.ptr<uchar>(i + 1);
uchar* out = dstImg.ptr<uchar>(i);
for (int j = 1; j < imgWidth - 1; j++)
{
Gx = (dataUp[j + 1] + 2 * data[j + 1] + dataDown[j + 1]) - (dataUp[j - 1] + 2 * data[j - 1] + dataDown[j - 1]);
Gy = (dataUp[j - 1] + 2 * dataUp[j] + dataUp[j + 1]) - (dataDown[j - 1] + 2 * dataDown[j] + dataDown[j + 1]);
out[j] = (abs(Gx) + abs(Gy)) / 2;
}
}
}
int main()
{
Mat grayImg = imread("D:/project/image_segment_with_cuda/test.jpg", 0);
int imgHeight = grayImg.rows;
int imgWidth = grayImg.cols;
Mat gaussImg;
//
GaussianBlur(grayImg, gaussImg, Size(3, 3), 0, 0, BORDER_DEFAULT);
double time1 = static_cast<double>(getTickCount());
//SobelCPU
Mat dst(imgHeight, imgWidth, CV_8UC1, Scalar(0));
sobel(gaussImg, dst, imgHeight, imgWidth);
//
time1 = ((double)getTickCount() - time1) / getTickFrequency();
//
cout << "The Run Time is :" << time1<< "s" << endl;
//CUDA
Mat dstImg(imgHeight, imgWidth, CV_8UC1, Scalar(0));
//GPU
unsigned char* d_in;
unsigned char* d_out;
hipMalloc((void**)&d_in, imgHeight * imgWidth * sizeof(unsigned char));
hipMalloc((void**)&d_out, imgHeight * imgWidth * sizeof(unsigned char));
//CPUGPU
hipMemcpy(d_in, gaussImg.data, imgHeight * imgWidth * sizeof(unsigned char), hipMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 blocksPerGrid((imgWidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgHeight + threadsPerBlock.y - 1) / threadsPerBlock.y);
//
double time0 = static_cast<double>(getTickCount());
//
sobelInCuda << <blockspergrid, threadsperblock>>> (d_in, d_out, imgheight, imgwidth);
//sobelInCuda << <1,512 >> > (d_in, d_out, imgHeight, imgWidth);
//
time0 = ((double)getTickCount() - time0) / getTickFrequency();
//
cout << "The Run Time is :" << time0 << "s" << endl;
//GPU
hipMemcpy(dstImg.data, d_out, imgHeight * imgWidth * sizeof(unsigned char), hipMemcpyDeviceToHost);
//GPU
hipFree(d_in);
hipFree(d_out);
return 0;
}
|
6bd3d8330e23e39add278f98dcf872587a946020.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
//Sobel算子边缘检测核函数
__global__ void sobelInCuda(unsigned char* dataIn, unsigned char* dataOut, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int index = yIndex * imgWidth + xIndex;
int Gx = 0;
int Gy = 0;
if (xIndex > 0 && xIndex < imgWidth - 1 && yIndex > 0 && yIndex < imgHeight - 1)
{
Gx = dataIn[(yIndex - 1) * imgWidth + xIndex + 1] + 2 * dataIn[yIndex * imgWidth + xIndex + 1] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[yIndex * imgWidth + xIndex - 1] + dataIn[(yIndex + 1) * imgWidth + xIndex - 1]);
Gy = dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex - 1) * imgWidth + xIndex] + dataIn[(yIndex - 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex + 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex + 1) * imgWidth + xIndex] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]);
dataOut[index] = (abs(Gx) + abs(Gy)) / 2;
}
}
//Sobel算子边缘检测CPU函数
void sobel(Mat srcImg, Mat dstImg, int imgHeight, int imgWidth)
{
int Gx = 0;
int Gy = 0;
for (int i = 1; i < imgHeight - 1; i++)
{
uchar* dataUp = srcImg.ptr<uchar>(i - 1);
uchar* data = srcImg.ptr<uchar>(i);
uchar* dataDown = srcImg.ptr<uchar>(i + 1);
uchar* out = dstImg.ptr<uchar>(i);
for (int j = 1; j < imgWidth - 1; j++)
{
Gx = (dataUp[j + 1] + 2 * data[j + 1] + dataDown[j + 1]) - (dataUp[j - 1] + 2 * data[j - 1] + dataDown[j - 1]);
Gy = (dataUp[j - 1] + 2 * dataUp[j] + dataUp[j + 1]) - (dataDown[j - 1] + 2 * dataDown[j] + dataDown[j + 1]);
out[j] = (abs(Gx) + abs(Gy)) / 2;
}
}
}
int main()
{
Mat grayImg = imread("D:/project/image_segment_with_cuda/test.jpg", 0);
int imgHeight = grayImg.rows;
int imgWidth = grayImg.cols;
Mat gaussImg;
//高斯滤波
GaussianBlur(grayImg, gaussImg, Size(3, 3), 0, 0, BORDER_DEFAULT);
double time1 = static_cast<double>(getTickCount());
//Sobel算子CPU实现
Mat dst(imgHeight, imgWidth, CV_8UC1, Scalar(0));
sobel(gaussImg, dst, imgHeight, imgWidth);
//计时器结束
time1 = ((double)getTickCount() - time1) / getTickFrequency();
//输出运行时间
cout << "The Run Time is :" << time1<< "s" << endl;
//CUDA实现后的传回的图像
Mat dstImg(imgHeight, imgWidth, CV_8UC1, Scalar(0));
//创建GPU内存
unsigned char* d_in;
unsigned char* d_out;
cudaMalloc((void**)&d_in, imgHeight * imgWidth * sizeof(unsigned char));
cudaMalloc((void**)&d_out, imgHeight * imgWidth * sizeof(unsigned char));
//将高斯滤波后的图像从CPU传入GPU
cudaMemcpy(d_in, gaussImg.data, imgHeight * imgWidth * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 blocksPerGrid((imgWidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgHeight + threadsPerBlock.y - 1) / threadsPerBlock.y);
//计时器开始
double time0 = static_cast<double>(getTickCount());
//调用核函数
sobelInCuda << <blockspergrid, threadsperblock>>> (d_in, d_out, imgheight, imgwidth);
//sobelInCuda << <1,512 >> > (d_in, d_out, imgHeight, imgWidth);
//计时器结束
time0 = ((double)getTickCount() - time0) / getTickFrequency();
//输出运行时间
cout << "The Run Time is :" << time0 << "s" << endl;
//将图像传回GPU
cudaMemcpy(dstImg.data, d_out, imgHeight * imgWidth * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//释放GPU内存
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
bd672bcba83ff5944cbe4a24640e596f4dd87dce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(),
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(),
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
bd672bcba83ff5944cbe4a24640e596f4dd87dce.cu
|
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
96a5568ffb16ed51a3648df6fcd4f453d580741f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "blurKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *img_device = NULL;
hipMalloc(&img_device, XSIZE*YSIZE);
unsigned char *img_device2 = NULL;
hipMalloc(&img_device2, XSIZE*YSIZE);
uint32_t width_image = XSIZE;
uint32_t height_image = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
blurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_device,img_device2,width_image,height_image);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
blurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_device,img_device2,width_image,height_image);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
blurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_device,img_device2,width_image,height_image);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
96a5568ffb16ed51a3648df6fcd4f453d580741f.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "blurKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *img_device = NULL;
cudaMalloc(&img_device, XSIZE*YSIZE);
unsigned char *img_device2 = NULL;
cudaMalloc(&img_device2, XSIZE*YSIZE);
uint32_t width_image = XSIZE;
uint32_t height_image = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
blurKernel<<<gridBlock,threadBlock>>>(img_device,img_device2,width_image,height_image);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
blurKernel<<<gridBlock,threadBlock>>>(img_device,img_device2,width_image,height_image);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
blurKernel<<<gridBlock,threadBlock>>>(img_device,img_device2,width_image,height_image);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
43f622c41dfd9a7d38e60b26f1a44405430ae20c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 4
#define TC 16
#define C 64
#define N 64
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[56];
__shared__ float pad_temp_shared[960];
__shared__ float kernel_shared[384];
float pad_temp_shared_local[28];
float kernel_shared_local[2];
#pragma unroll
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute_local[(xx_c_init)] = 0.000000e+00f;
compute_local[((xx_c_init + 28))] = 0.000000e+00f;
compute_local[((xx_c_init + 2))] = 0.000000e+00f;
compute_local[((xx_c_init + 30))] = 0.000000e+00f;
compute_local[((xx_c_init + 4))] = 0.000000e+00f;
compute_local[((xx_c_init + 32))] = 0.000000e+00f;
compute_local[((xx_c_init + 6))] = 0.000000e+00f;
compute_local[((xx_c_init + 34))] = 0.000000e+00f;
compute_local[((xx_c_init + 8))] = 0.000000e+00f;
compute_local[((xx_c_init + 36))] = 0.000000e+00f;
compute_local[((xx_c_init + 10))] = 0.000000e+00f;
compute_local[((xx_c_init + 38))] = 0.000000e+00f;
compute_local[((xx_c_init + 12))] = 0.000000e+00f;
compute_local[((xx_c_init + 40))] = 0.000000e+00f;
compute_local[((xx_c_init + 14))] = 0.000000e+00f;
compute_local[((xx_c_init + 42))] = 0.000000e+00f;
compute_local[((xx_c_init + 16))] = 0.000000e+00f;
compute_local[((xx_c_init + 44))] = 0.000000e+00f;
compute_local[((xx_c_init + 18))] = 0.000000e+00f;
compute_local[((xx_c_init + 46))] = 0.000000e+00f;
compute_local[((xx_c_init + 20))] = 0.000000e+00f;
compute_local[((xx_c_init + 48))] = 0.000000e+00f;
compute_local[((xx_c_init + 22))] = 0.000000e+00f;
compute_local[((xx_c_init + 50))] = 0.000000e+00f;
compute_local[((xx_c_init + 24))] = 0.000000e+00f;
compute_local[((xx_c_init + 52))] = 0.000000e+00f;
compute_local[((xx_c_init + 26))] = 0.000000e+00f;
compute_local[((xx_c_init + 54))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 120; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 240) + (((int)threadIdx.y) * 120)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((((int)blockIdx.y) * 2) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 2) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30)) + ry_outer) < 57)) && (1 <= ((((int)blockIdx.x) * 28) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)))) && (((((int)blockIdx.x) * 28) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)) < 57)) ? data[(((((((((((rc_outer * 50176) + (((int)threadIdx.z) * 12544)) + (((int)threadIdx.y) * 6272)) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner / 60) * 3136)) + (((int)blockIdx.y) * 112)) + (((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30) * 56)) + (ry_outer * 56)) + (((int)blockIdx.x) * 28)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)) - 57))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 48; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 48)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 4608) + (((int)threadIdx.z) * 1152)) + (((int)threadIdx.y) * 576)) + (rc_outer * 144)) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3) * 9)) + (ry_outer * 3)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 3)))];
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax3 = 0; ax3 < 2; ++ax3) {
pad_temp_shared_local[(ax3)] = pad_temp_shared[(((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer))];
pad_temp_shared_local[((ax3 + 2))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax3 + 4))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 4))];
pad_temp_shared_local[((ax3 + 6))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 6))];
pad_temp_shared_local[((ax3 + 8))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 8))];
pad_temp_shared_local[((ax3 + 10))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 10))];
pad_temp_shared_local[((ax3 + 12))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 12))];
pad_temp_shared_local[((ax3 + 14))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 14))];
pad_temp_shared_local[((ax3 + 16))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 16))];
pad_temp_shared_local[((ax3 + 18))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 18))];
pad_temp_shared_local[((ax3 + 20))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 20))];
pad_temp_shared_local[((ax3 + 22))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 22))];
pad_temp_shared_local[((ax3 + 24))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 24))];
pad_temp_shared_local[((ax3 + 26))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 26))];
}
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 3)) + rx_inner_outer))];
kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 48) + (rc_inner_outer * 3)) + rx_inner_outer) + 192))];
#pragma unroll
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute_local[(xx_c)] = (compute_local[(xx_c)] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(0)]));
compute_local[((xx_c + 28))] = (compute_local[((xx_c + 28))] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(1)]));
compute_local[((xx_c + 2))] = (compute_local[((xx_c + 2))] + (pad_temp_shared_local[((xx_c + 2))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 30))] = (compute_local[((xx_c + 30))] + (pad_temp_shared_local[((xx_c + 2))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 4))] = (compute_local[((xx_c + 4))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 32))] = (compute_local[((xx_c + 32))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 6))] = (compute_local[((xx_c + 6))] + (pad_temp_shared_local[((xx_c + 6))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 34))] = (compute_local[((xx_c + 34))] + (pad_temp_shared_local[((xx_c + 6))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 8))] = (compute_local[((xx_c + 8))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 36))] = (compute_local[((xx_c + 36))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 10))] = (compute_local[((xx_c + 10))] + (pad_temp_shared_local[((xx_c + 10))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 38))] = (compute_local[((xx_c + 38))] + (pad_temp_shared_local[((xx_c + 10))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 12))] = (compute_local[((xx_c + 12))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 40))] = (compute_local[((xx_c + 40))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 14))] = (compute_local[((xx_c + 14))] + (pad_temp_shared_local[((xx_c + 14))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 42))] = (compute_local[((xx_c + 42))] + (pad_temp_shared_local[((xx_c + 14))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 16))] = (compute_local[((xx_c + 16))] + (pad_temp_shared_local[((xx_c + 16))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 44))] = (compute_local[((xx_c + 44))] + (pad_temp_shared_local[((xx_c + 16))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 18))] = (compute_local[((xx_c + 18))] + (pad_temp_shared_local[((xx_c + 18))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 46))] = (compute_local[((xx_c + 46))] + (pad_temp_shared_local[((xx_c + 18))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 20))] = (compute_local[((xx_c + 20))] + (pad_temp_shared_local[((xx_c + 20))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 48))] = (compute_local[((xx_c + 48))] + (pad_temp_shared_local[((xx_c + 20))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 22))] = (compute_local[((xx_c + 22))] + (pad_temp_shared_local[((xx_c + 22))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 50))] = (compute_local[((xx_c + 50))] + (pad_temp_shared_local[((xx_c + 22))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 24))] = (compute_local[((xx_c + 24))] + (pad_temp_shared_local[((xx_c + 24))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 52))] = (compute_local[((xx_c + 52))] + (pad_temp_shared_local[((xx_c + 24))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 26))] = (compute_local[((xx_c + 26))] + (pad_temp_shared_local[((xx_c + 26))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 54))] = (compute_local[((xx_c + 54))] + (pad_temp_shared_local[((xx_c + 26))] * kernel_shared_local[(1)]));
}
}
}
}
}
#pragma unroll
for (int xx_inner_inner_inner = 0; xx_inner_inner_inner < 2; ++xx_inner_inner_inner) {
compute[(((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner))] = compute_local[(xx_inner_inner_inner)];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12544))] = compute_local[((xx_inner_inner_inner + 28))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 2))] = compute_local[((xx_inner_inner_inner + 2))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12546))] = compute_local[((xx_inner_inner_inner + 30))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 4))] = compute_local[((xx_inner_inner_inner + 4))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12548))] = compute_local[((xx_inner_inner_inner + 32))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 6))] = compute_local[((xx_inner_inner_inner + 6))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12550))] = compute_local[((xx_inner_inner_inner + 34))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 8))] = compute_local[((xx_inner_inner_inner + 8))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12552))] = compute_local[((xx_inner_inner_inner + 36))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 10))] = compute_local[((xx_inner_inner_inner + 10))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12554))] = compute_local[((xx_inner_inner_inner + 38))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12))] = compute_local[((xx_inner_inner_inner + 12))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12556))] = compute_local[((xx_inner_inner_inner + 40))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 14))] = compute_local[((xx_inner_inner_inner + 14))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12558))] = compute_local[((xx_inner_inner_inner + 42))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 16))] = compute_local[((xx_inner_inner_inner + 16))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12560))] = compute_local[((xx_inner_inner_inner + 44))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 18))] = compute_local[((xx_inner_inner_inner + 18))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12562))] = compute_local[((xx_inner_inner_inner + 46))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 20))] = compute_local[((xx_inner_inner_inner + 20))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12564))] = compute_local[((xx_inner_inner_inner + 48))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 22))] = compute_local[((xx_inner_inner_inner + 22))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12566))] = compute_local[((xx_inner_inner_inner + 50))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 24))] = compute_local[((xx_inner_inner_inner + 24))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12568))] = compute_local[((xx_inner_inner_inner + 52))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 26))] = compute_local[((xx_inner_inner_inner + 26))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12570))] = compute_local[((xx_inner_inner_inner + 54))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,28,8);
dim3 block(1,2,4);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
43f622c41dfd9a7d38e60b26f1a44405430ae20c.cu
|
#include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 4
#define TC 16
#define C 64
#define N 64
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[56];
__shared__ float pad_temp_shared[960];
__shared__ float kernel_shared[384];
float pad_temp_shared_local[28];
float kernel_shared_local[2];
#pragma unroll
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute_local[(xx_c_init)] = 0.000000e+00f;
compute_local[((xx_c_init + 28))] = 0.000000e+00f;
compute_local[((xx_c_init + 2))] = 0.000000e+00f;
compute_local[((xx_c_init + 30))] = 0.000000e+00f;
compute_local[((xx_c_init + 4))] = 0.000000e+00f;
compute_local[((xx_c_init + 32))] = 0.000000e+00f;
compute_local[((xx_c_init + 6))] = 0.000000e+00f;
compute_local[((xx_c_init + 34))] = 0.000000e+00f;
compute_local[((xx_c_init + 8))] = 0.000000e+00f;
compute_local[((xx_c_init + 36))] = 0.000000e+00f;
compute_local[((xx_c_init + 10))] = 0.000000e+00f;
compute_local[((xx_c_init + 38))] = 0.000000e+00f;
compute_local[((xx_c_init + 12))] = 0.000000e+00f;
compute_local[((xx_c_init + 40))] = 0.000000e+00f;
compute_local[((xx_c_init + 14))] = 0.000000e+00f;
compute_local[((xx_c_init + 42))] = 0.000000e+00f;
compute_local[((xx_c_init + 16))] = 0.000000e+00f;
compute_local[((xx_c_init + 44))] = 0.000000e+00f;
compute_local[((xx_c_init + 18))] = 0.000000e+00f;
compute_local[((xx_c_init + 46))] = 0.000000e+00f;
compute_local[((xx_c_init + 20))] = 0.000000e+00f;
compute_local[((xx_c_init + 48))] = 0.000000e+00f;
compute_local[((xx_c_init + 22))] = 0.000000e+00f;
compute_local[((xx_c_init + 50))] = 0.000000e+00f;
compute_local[((xx_c_init + 24))] = 0.000000e+00f;
compute_local[((xx_c_init + 52))] = 0.000000e+00f;
compute_local[((xx_c_init + 26))] = 0.000000e+00f;
compute_local[((xx_c_init + 54))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 120; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 240) + (((int)threadIdx.y) * 120)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((((int)blockIdx.y) * 2) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 2) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30)) + ry_outer) < 57)) && (1 <= ((((int)blockIdx.x) * 28) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)))) && (((((int)blockIdx.x) * 28) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)) < 57)) ? data[(((((((((((rc_outer * 50176) + (((int)threadIdx.z) * 12544)) + (((int)threadIdx.y) * 6272)) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner / 60) * 3136)) + (((int)blockIdx.y) * 112)) + (((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 60) / 30) * 56)) + (ry_outer * 56)) + (((int)blockIdx.x) * 28)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner % 30)) - 57))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 48; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 48)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 4608) + (((int)threadIdx.z) * 1152)) + (((int)threadIdx.y) * 576)) + (rc_outer * 144)) + ((ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3) * 9)) + (ry_outer * 3)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 3)))];
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax3 = 0; ax3 < 2; ++ax3) {
pad_temp_shared_local[(ax3)] = pad_temp_shared[(((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer))];
pad_temp_shared_local[((ax3 + 2))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax3 + 4))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 4))];
pad_temp_shared_local[((ax3 + 6))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 6))];
pad_temp_shared_local[((ax3 + 8))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 8))];
pad_temp_shared_local[((ax3 + 10))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 10))];
pad_temp_shared_local[((ax3 + 12))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 12))];
pad_temp_shared_local[((ax3 + 14))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 14))];
pad_temp_shared_local[((ax3 + 16))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 16))];
pad_temp_shared_local[((ax3 + 18))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 18))];
pad_temp_shared_local[((ax3 + 20))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 20))];
pad_temp_shared_local[((ax3 + 22))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 22))];
pad_temp_shared_local[((ax3 + 24))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 24))];
pad_temp_shared_local[((ax3 + 26))] = pad_temp_shared[((((((rc_inner_outer * 60) + (((int)threadIdx.y) * 30)) + ax3) + rx_inner_outer) + 26))];
}
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 3)) + rx_inner_outer))];
kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 48) + (rc_inner_outer * 3)) + rx_inner_outer) + 192))];
#pragma unroll
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute_local[(xx_c)] = (compute_local[(xx_c)] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(0)]));
compute_local[((xx_c + 28))] = (compute_local[((xx_c + 28))] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(1)]));
compute_local[((xx_c + 2))] = (compute_local[((xx_c + 2))] + (pad_temp_shared_local[((xx_c + 2))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 30))] = (compute_local[((xx_c + 30))] + (pad_temp_shared_local[((xx_c + 2))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 4))] = (compute_local[((xx_c + 4))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 32))] = (compute_local[((xx_c + 32))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 6))] = (compute_local[((xx_c + 6))] + (pad_temp_shared_local[((xx_c + 6))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 34))] = (compute_local[((xx_c + 34))] + (pad_temp_shared_local[((xx_c + 6))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 8))] = (compute_local[((xx_c + 8))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 36))] = (compute_local[((xx_c + 36))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 10))] = (compute_local[((xx_c + 10))] + (pad_temp_shared_local[((xx_c + 10))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 38))] = (compute_local[((xx_c + 38))] + (pad_temp_shared_local[((xx_c + 10))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 12))] = (compute_local[((xx_c + 12))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 40))] = (compute_local[((xx_c + 40))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 14))] = (compute_local[((xx_c + 14))] + (pad_temp_shared_local[((xx_c + 14))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 42))] = (compute_local[((xx_c + 42))] + (pad_temp_shared_local[((xx_c + 14))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 16))] = (compute_local[((xx_c + 16))] + (pad_temp_shared_local[((xx_c + 16))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 44))] = (compute_local[((xx_c + 44))] + (pad_temp_shared_local[((xx_c + 16))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 18))] = (compute_local[((xx_c + 18))] + (pad_temp_shared_local[((xx_c + 18))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 46))] = (compute_local[((xx_c + 46))] + (pad_temp_shared_local[((xx_c + 18))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 20))] = (compute_local[((xx_c + 20))] + (pad_temp_shared_local[((xx_c + 20))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 48))] = (compute_local[((xx_c + 48))] + (pad_temp_shared_local[((xx_c + 20))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 22))] = (compute_local[((xx_c + 22))] + (pad_temp_shared_local[((xx_c + 22))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 50))] = (compute_local[((xx_c + 50))] + (pad_temp_shared_local[((xx_c + 22))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 24))] = (compute_local[((xx_c + 24))] + (pad_temp_shared_local[((xx_c + 24))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 52))] = (compute_local[((xx_c + 52))] + (pad_temp_shared_local[((xx_c + 24))] * kernel_shared_local[(1)]));
compute_local[((xx_c + 26))] = (compute_local[((xx_c + 26))] + (pad_temp_shared_local[((xx_c + 26))] * kernel_shared_local[(0)]));
compute_local[((xx_c + 54))] = (compute_local[((xx_c + 54))] + (pad_temp_shared_local[((xx_c + 26))] * kernel_shared_local[(1)]));
}
}
}
}
}
#pragma unroll
for (int xx_inner_inner_inner = 0; xx_inner_inner_inner < 2; ++xx_inner_inner_inner) {
compute[(((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner))] = compute_local[(xx_inner_inner_inner)];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12544))] = compute_local[((xx_inner_inner_inner + 28))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 2))] = compute_local[((xx_inner_inner_inner + 2))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12546))] = compute_local[((xx_inner_inner_inner + 30))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 4))] = compute_local[((xx_inner_inner_inner + 4))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12548))] = compute_local[((xx_inner_inner_inner + 32))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 6))] = compute_local[((xx_inner_inner_inner + 6))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12550))] = compute_local[((xx_inner_inner_inner + 34))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 8))] = compute_local[((xx_inner_inner_inner + 8))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12552))] = compute_local[((xx_inner_inner_inner + 36))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 10))] = compute_local[((xx_inner_inner_inner + 10))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12554))] = compute_local[((xx_inner_inner_inner + 38))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12))] = compute_local[((xx_inner_inner_inner + 12))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12556))] = compute_local[((xx_inner_inner_inner + 40))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 14))] = compute_local[((xx_inner_inner_inner + 14))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12558))] = compute_local[((xx_inner_inner_inner + 42))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 16))] = compute_local[((xx_inner_inner_inner + 16))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12560))] = compute_local[((xx_inner_inner_inner + 44))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 18))] = compute_local[((xx_inner_inner_inner + 18))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12562))] = compute_local[((xx_inner_inner_inner + 46))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 20))] = compute_local[((xx_inner_inner_inner + 20))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12564))] = compute_local[((xx_inner_inner_inner + 48))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 22))] = compute_local[((xx_inner_inner_inner + 22))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12566))] = compute_local[((xx_inner_inner_inner + 50))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 24))] = compute_local[((xx_inner_inner_inner + 24))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12568))] = compute_local[((xx_inner_inner_inner + 52))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 26))] = compute_local[((xx_inner_inner_inner + 26))];
compute[((((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 28)) + xx_inner_inner_inner) + 12570))] = compute_local[((xx_inner_inner_inner + 54))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,28,8);
dim3 block(1,2,4);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
2548141466d3e21224db9ebfbccc225e5f64542c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT1024_device( float2 *dst, float2 *src )
{
int tid = threadIdx.x;
int iblock = blockIdx.y * gridDim.x + blockIdx.x;
int index = iblock * 1024 + tid;
src += index;
dst += index;
int hi4 = tid>>4;
int lo4 = tid&15;
int hi2 = tid>>4;
int mi2 = (tid>>2)&3;
int lo2 = tid&3;
float2 a[16];
__shared__ float smem[69*16];
load<16>( a, src, 64 );
IFFT16( a );
itwiddle<16>( a, tid, 1024 );
int il[] = {0,1,2,3, 16,17,18,19, 32,33,34,35, 48,49,50,51};
transpose_br<16>( a, &smem[lo4*65+hi4], 4, &smem[lo4*65+hi4*4], il );
IFFT4x4( a );
itwiddle4x4( a, lo4 );
transpose4x4( a, &smem[hi2*17 + mi2*4 + lo2], 69, &smem[mi2*69*4 + hi2*69 + lo2*17 ], 1, 0xE );
IFFT16( a );
store<16>( a, dst, 64 );
}
extern "C" void IFFT1024( float2 *work, int batch )
{
hipLaunchKernelGGL(( IFFT1024_device), dim3(grid2D(batch)), dim3(64) , 0, 0, work, work );
}
|
2548141466d3e21224db9ebfbccc225e5f64542c.cu
|
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT1024_device( float2 *dst, float2 *src )
{
int tid = threadIdx.x;
int iblock = blockIdx.y * gridDim.x + blockIdx.x;
int index = iblock * 1024 + tid;
src += index;
dst += index;
int hi4 = tid>>4;
int lo4 = tid&15;
int hi2 = tid>>4;
int mi2 = (tid>>2)&3;
int lo2 = tid&3;
float2 a[16];
__shared__ float smem[69*16];
load<16>( a, src, 64 );
IFFT16( a );
itwiddle<16>( a, tid, 1024 );
int il[] = {0,1,2,3, 16,17,18,19, 32,33,34,35, 48,49,50,51};
transpose_br<16>( a, &smem[lo4*65+hi4], 4, &smem[lo4*65+hi4*4], il );
IFFT4x4( a );
itwiddle4x4( a, lo4 );
transpose4x4( a, &smem[hi2*17 + mi2*4 + lo2], 69, &smem[mi2*69*4 + hi2*69 + lo2*17 ], 1, 0xE );
IFFT16( a );
store<16>( a, dst, 64 );
}
extern "C" void IFFT1024( float2 *work, int batch )
{
IFFT1024_device<<< grid2D(batch), 64 >>>( work, work );
}
|
680cb79581c5c9a5509b14ad8a88673f236676d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
static TexFloat arrStencil_0;
extern "C" __global__ void stencil(const Int64 shOut_1, const Int64 shOut_0, float* __restrict__ arrOut_0, const Int64 shStencil_1, const Int64 shStencil_0)
{
const int shapeSize = shOut_1 * shOut_0;
const int gridSize = __umul24(blockDim.x, gridDim.x);
int ix;
for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
if (__all(sh1 >= 2 && sh1 < shOut_1 - 2 && (sh0 >= 1 && sh0 < shOut_0 - 1))) {
const Int64 w1 = (sh1 + -2) * shStencil_0 + (sh0 + 0);
const Int64 w4 = (sh1 + -1) * shStencil_0 + (sh0 + 0);
const Int64 w9 = (sh1 + 1) * shStencil_0 + (sh0 + 0);
const Int64 w12 = (sh1 + 2) * shStencil_0 + (sh0 + 0);
const float x13 = indexArray(arrStencil_0, w1);
const float x10 = indexArray(arrStencil_0, w4);
const float x7 = indexArray(arrStencil_0, ix);
const float x4 = indexArray(arrStencil_0, w9);
const float x1 = indexArray(arrStencil_0, w12);
arrOut_0[ix] = 3.90625e-3f * x13 + 1.5625e-2f * x10 + 2.34375e-2f * x7 + 1.5625e-2f * x4 + 3.90625e-3f * x1;
} else {
const Int64 w1 = max((Int64) 0, min(sh1 + -2, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w4 = max((Int64) 0, min(sh1 + -1, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w9 = max((Int64) 0, min(sh1 + 1, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w12 = max((Int64) 0, min(sh1 + 2, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const float x13 = indexArray(arrStencil_0, w1);
const float x10 = indexArray(arrStencil_0, w4);
const float x7 = indexArray(arrStencil_0, ix);
const float x4 = indexArray(arrStencil_0, w9);
const float x1 = indexArray(arrStencil_0, w12);
arrOut_0[ix] = 3.90625e-3f * x13 + 1.5625e-2f * x10 + 2.34375e-2f * x7 + 1.5625e-2f * x4 + 3.90625e-3f * x1;
}
}
}
|
680cb79581c5c9a5509b14ad8a88673f236676d8.cu
|
#include <accelerate_cuda.h>
static TexFloat arrStencil_0;
extern "C" __global__ void stencil(const Int64 shOut_1, const Int64 shOut_0, float* __restrict__ arrOut_0, const Int64 shStencil_1, const Int64 shStencil_0)
{
const int shapeSize = shOut_1 * shOut_0;
const int gridSize = __umul24(blockDim.x, gridDim.x);
int ix;
for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
if (__all(sh1 >= 2 && sh1 < shOut_1 - 2 && (sh0 >= 1 && sh0 < shOut_0 - 1))) {
const Int64 w1 = (sh1 + -2) * shStencil_0 + (sh0 + 0);
const Int64 w4 = (sh1 + -1) * shStencil_0 + (sh0 + 0);
const Int64 w9 = (sh1 + 1) * shStencil_0 + (sh0 + 0);
const Int64 w12 = (sh1 + 2) * shStencil_0 + (sh0 + 0);
const float x13 = indexArray(arrStencil_0, w1);
const float x10 = indexArray(arrStencil_0, w4);
const float x7 = indexArray(arrStencil_0, ix);
const float x4 = indexArray(arrStencil_0, w9);
const float x1 = indexArray(arrStencil_0, w12);
arrOut_0[ix] = 3.90625e-3f * x13 + 1.5625e-2f * x10 + 2.34375e-2f * x7 + 1.5625e-2f * x4 + 3.90625e-3f * x1;
} else {
const Int64 w1 = max((Int64) 0, min(sh1 + -2, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w4 = max((Int64) 0, min(sh1 + -1, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w9 = max((Int64) 0, min(sh1 + 1, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const Int64 w12 = max((Int64) 0, min(sh1 + 2, shStencil_1 - 1)) * shStencil_0 + max((Int64) 0, min(sh0 + 0, shStencil_0 - 1));
const float x13 = indexArray(arrStencil_0, w1);
const float x10 = indexArray(arrStencil_0, w4);
const float x7 = indexArray(arrStencil_0, ix);
const float x4 = indexArray(arrStencil_0, w9);
const float x1 = indexArray(arrStencil_0, w12);
arrOut_0[ix] = 3.90625e-3f * x13 + 1.5625e-2f * x10 + 2.34375e-2f * x7 + 1.5625e-2f * x4 + 3.90625e-3f * x1;
}
}
}
|
b0b2c3fe2ed1184c87359ee7ac22b5e7eaec1b94.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelGenerateTriangles(int *voronoiPtr, short2 *patternPtr, int3 *ctriangles, int *offset, int width, int min, int max) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < min || x >= max)
return ;
int xwidth = x * width;
short2 pattern = patternPtr[xwidth + min];
int i0, i1, i2, i3;
int3 *pT = &ctriangles[offset[x-1]];
// Jump through all voronoi vertices in a texture row
while (pattern.y > 0 && pattern.y < max) {
i0 = voronoiPtr[xwidth + pattern.y];
i1 = voronoiPtr[xwidth + pattern.y + 1];
i2 = voronoiPtr[xwidth + width + pattern.y + 1];
i3 = voronoiPtr[xwidth + width + pattern.y];
if (pattern.x == 0) *pT = make_int3(i3, i1, i2);
if (pattern.x == 1) *pT = make_int3(i0, i2, i3);
if (pattern.x == 2) *pT = make_int3(i1, i3, i0);
if (pattern.x == 3) *pT = make_int3(i2, i0, i1);
if (pattern.x == 4) {
// Generate 2 triangles.
// Since the hole is convex, no need to do CCW test
*pT = make_int3(i2, i0, i1); pT++;
*pT = make_int3(i3, i0, i2);
}
pattern = patternPtr[xwidth + pattern.y + 1];
pT++;
}
}
|
b0b2c3fe2ed1184c87359ee7ac22b5e7eaec1b94.cu
|
#include "includes.h"
__global__ void kernelGenerateTriangles(int *voronoiPtr, short2 *patternPtr, int3 *ctriangles, int *offset, int width, int min, int max) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < min || x >= max)
return ;
int xwidth = x * width;
short2 pattern = patternPtr[xwidth + min];
int i0, i1, i2, i3;
int3 *pT = &ctriangles[offset[x-1]];
// Jump through all voronoi vertices in a texture row
while (pattern.y > 0 && pattern.y < max) {
i0 = voronoiPtr[xwidth + pattern.y];
i1 = voronoiPtr[xwidth + pattern.y + 1];
i2 = voronoiPtr[xwidth + width + pattern.y + 1];
i3 = voronoiPtr[xwidth + width + pattern.y];
if (pattern.x == 0) *pT = make_int3(i3, i1, i2);
if (pattern.x == 1) *pT = make_int3(i0, i2, i3);
if (pattern.x == 2) *pT = make_int3(i1, i3, i0);
if (pattern.x == 3) *pT = make_int3(i2, i0, i1);
if (pattern.x == 4) {
// Generate 2 triangles.
// Since the hole is convex, no need to do CCW test
*pT = make_int3(i2, i0, i1); pT++;
*pT = make_int3(i3, i0, i2);
}
pattern = patternPtr[xwidth + pattern.y + 1];
pT++;
}
}
|
9e34a7ae23ca9bd8bb726c55fcf4483090ad5729.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "build_hll.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
unsigned int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
unsigned int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
build_hll), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
build_hll), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
build_hll), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9e34a7ae23ca9bd8bb726c55fcf4483090ad5729.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "build_hll.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
unsigned int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
unsigned int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
build_hll<<<gridBlock,threadBlock>>>(n,in,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
build_hll<<<gridBlock,threadBlock>>>(n,in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
build_hll<<<gridBlock,threadBlock>>>(n,in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d1a9c431b92c856fd3dc93eb1fd1048d7d95f9c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// The difference to zmergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge1_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex beta=skp[1];
magmaDoubleComplex omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
v magmaDoubleComplex_ptr
input vector v
@param[in]
r magmaDoubleComplex_ptr
input vector r
@param[in,out]
p magmaDoubleComplex_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge1(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgmerge1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge2_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
r magmaDoubleComplex_ptr
input vector r
@param[in]
v magmaDoubleComplex_ptr
input vector v
@param[out]
s magmaDoubleComplex_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge2(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgmerge2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge3_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * p,
magmaDoubleComplex * se,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if ( i<n ) {
magmaDoubleComplex s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
p magmaDoubleComplex_ptr
input p
@param[in]
s magmaDoubleComplex_ptr
input s
@param[in]
t magmaDoubleComplex_ptr
input t
@param[in,out]
x magmaDoubleComplex_ptr
input/output x
@param[in,out]
r magmaDoubleComplex_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge3(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgmerge3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge4_kernel_1(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_zbicgmerge4_kernel_2(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_zbicgmerge4_kernel_3(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaDoubleComplex_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge4(
magma_int_t type,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
hipLaunchKernelGGL(( magma_zbicgmerge4_kernel_1), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 2 )
hipLaunchKernelGGL(( magma_zbicgmerge4_kernel_2), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 3 )
hipLaunchKernelGGL(( magma_zbicgmerge4_kernel_3), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
|
d1a9c431b92c856fd3dc93eb1fd1048d7d95f9c1.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// The difference to zmergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge1_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex beta=skp[1];
magmaDoubleComplex omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
v magmaDoubleComplex_ptr
input vector v
@param[in]
r magmaDoubleComplex_ptr
input vector r
@param[in,out]
p magmaDoubleComplex_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge1(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_zbicgmerge1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge2_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
r magmaDoubleComplex_ptr
input vector r
@param[in]
v magmaDoubleComplex_ptr
input vector v
@param[out]
s magmaDoubleComplex_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge2(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_zbicgmerge2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge3_kernel(
int n,
magmaDoubleComplex * skp,
magmaDoubleComplex * p,
magmaDoubleComplex * se,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if ( i<n ) {
magmaDoubleComplex s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDoubleComplex_ptr
set of scalar parameters
@param[in]
p magmaDoubleComplex_ptr
input p
@param[in]
s magmaDoubleComplex_ptr
input s
@param[in]
t magmaDoubleComplex_ptr
input t
@param[in,out]
x magmaDoubleComplex_ptr
input/output x
@param[in,out]
r magmaDoubleComplex_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge3(
magma_int_t n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_zbicgmerge3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge4_kernel_1(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_zbicgmerge4_kernel_2(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_zbicgmerge4_kernel_3(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaDoubleComplex_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge4(
magma_int_t type,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
magma_zbicgmerge4_kernel_1<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 2 )
magma_zbicgmerge4_kernel_2<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 3 )
magma_zbicgmerge4_kernel_3<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
|
36579f7733c368a125a39ae0df24907991dfc764.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTableV2(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableV2Grad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
__global__ void InputTypeCovert(const T *in_ids, const int64_t K,
int64_t *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = (int64_t)(in_ids[i]);
}
}
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.InputNames("Ids").front();
auto out_name = context.OutputNames("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
dim3 threads(256, 4);
dim3 grids(80, 1);
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> ids;
ids.resize(K);
const int64_t *ids_p = nullptr;
if (ids_t->type() == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( InputTypeCovert<
int>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
ids_t->data<int>(), K, ids.MutableData(context.GetPlace()));
ids_p = ids.MutableData(context.GetPlace());
} else {
ids_p = ids_t->data<int64_t>();
}
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
if (padding_idx == -1)
hipLaunchKernelGGL(( LookupTableV2<
T, 256, 4, 80,
false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids_p, N, K, D, padding_idx);
else
hipLaunchKernelGGL(( LookupTableV2<
T, 256, 4, 80,
true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids_p, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
if (ids->type() == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( InputTypeCovert<
int>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
ids->data<int>(), ids_num,
new_rows.MutableData(context.GetPlace()));
} else {
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
}
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> ids;
ids.resize(K);
const int64_t *ids_p = nullptr;
if (ids_t->type() == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( InputTypeCovert<
int>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
ids_t->data<int>(), K, ids.MutableData(context.GetPlace()));
ids_p = ids.MutableData(context.GetPlace());
} else {
ids_p = ids_t->data<int64_t>();
}
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
hipLaunchKernelGGL(( LookupTableV2Grad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids_p, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table_v2, ops::LookupTableV2CUDAKernel<float>,
ops::LookupTableV2CUDAKernel<double>,
ops::LookupTableV2CUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_v2_grad,
ops::LookupTableV2GradCUDAKernel<float>,
ops::LookupTableV2GradCUDAKernel<double>,
ops::LookupTableV2GradCUDAKernel<plat::float16>);
|
36579f7733c368a125a39ae0df24907991dfc764.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTableV2(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableV2Grad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
__global__ void InputTypeCovert(const T *in_ids, const int64_t K,
int64_t *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = (int64_t)(in_ids[i]);
}
}
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.InputNames("Ids").front();
auto out_name = context.OutputNames("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
dim3 threads(256, 4);
dim3 grids(80, 1);
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> ids;
ids.resize(K);
const int64_t *ids_p = nullptr;
if (ids_t->type() == framework::proto::VarType::INT32) {
InputTypeCovert<
int><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
ids_t->data<int>(), K, ids.MutableData(context.GetPlace()));
ids_p = ids.MutableData(context.GetPlace());
} else {
ids_p = ids_t->data<int64_t>();
}
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
if (padding_idx == -1)
LookupTableV2<
T, 256, 4, 80,
false><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids_p, N, K, D, padding_idx);
else
LookupTableV2<
T, 256, 4, 80,
true><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids_p, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
if (ids->type() == framework::proto::VarType::INT32) {
InputTypeCovert<
int><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
ids->data<int>(), ids_num,
new_rows.MutableData(context.GetPlace()));
} else {
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
}
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> ids;
ids.resize(K);
const int64_t *ids_p = nullptr;
if (ids_t->type() == framework::proto::VarType::INT32) {
InputTypeCovert<
int><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
ids_t->data<int>(), K, ids.MutableData(context.GetPlace()));
ids_p = ids.MutableData(context.GetPlace());
} else {
ids_p = ids_t->data<int64_t>();
}
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
LookupTableV2Grad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids_p, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table_v2, ops::LookupTableV2CUDAKernel<float>,
ops::LookupTableV2CUDAKernel<double>,
ops::LookupTableV2CUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_v2_grad,
ops::LookupTableV2GradCUDAKernel<float>,
ops::LookupTableV2GradCUDAKernel<double>,
ops::LookupTableV2GradCUDAKernel<plat::float16>);
|
aa86d21b4d4f64e63875eee713c875293cbce8b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/operators/optimizers/adagrad_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
hipLaunchKernelGGL(( SparseAdagradFunctorKernel<
T, 256>), dim3(grid2), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr,
param_data, moment_data, grad_width, epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
aa86d21b4d4f64e63875eee713c875293cbce8b8.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/operators/optimizers/adagrad_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
SparseAdagradFunctorKernel<
T, 256><<<grid2, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr,
param_data, moment_data, grad_width, epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
7ab409d4d936b558fdd6ab200aefdfe9798d53cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pfbFilterShared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *filtered = NULL;
hipMalloc(&filtered, XSIZE*YSIZE);
float *unfiltered = NULL;
hipMalloc(&unfiltered, XSIZE*YSIZE);
float *taps = NULL;
hipMalloc(&taps, XSIZE*YSIZE);
const int ntaps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pfbFilterShared), dim3(gridBlock),dim3(threadBlock), 0, 0, filtered,unfiltered,taps,ntaps);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pfbFilterShared), dim3(gridBlock),dim3(threadBlock), 0, 0, filtered,unfiltered,taps,ntaps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pfbFilterShared), dim3(gridBlock),dim3(threadBlock), 0, 0, filtered,unfiltered,taps,ntaps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7ab409d4d936b558fdd6ab200aefdfe9798d53cb.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pfbFilterShared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *filtered = NULL;
cudaMalloc(&filtered, XSIZE*YSIZE);
float *unfiltered = NULL;
cudaMalloc(&unfiltered, XSIZE*YSIZE);
float *taps = NULL;
cudaMalloc(&taps, XSIZE*YSIZE);
const int ntaps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pfbFilterShared<<<gridBlock,threadBlock>>>(filtered,unfiltered,taps,ntaps);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pfbFilterShared<<<gridBlock,threadBlock>>>(filtered,unfiltered,taps,ntaps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pfbFilterShared<<<gridBlock,threadBlock>>>(filtered,unfiltered,taps,ntaps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
499fc410fca6a7b31ffee4b92917c809e8fb88b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Kernel for fast unfold+copy
// Borrowed from Theano
// Authors: Arjun Jain, Frdric Bastien, Jan Schlter, Nicolas Ballas
__global__ void im3d2col_kernel(const int n, const float* data_im,
const int height, const int width, const int depth,
const int kernel_h, const int kernel_w, const int kernel_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
const int height_col, const int width_col, const int depth_col,
float* data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int d_out = index % depth_col;
int w_index = index / depth_col;
int w_out = w_index % width_col;
int h_index = w_index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
//channel_in = 1;
int channel_out = channel_in * kernel_h * kernel_w * kernel_d;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
int d_in = d_out * stride_d - pad_d;
float* data_col_ptr = data_col;
data_col_ptr += channel_out * (height_col * width_col * depth_col) +
h_out * (width_col * depth_col) + w_out * depth_col + d_out;
const float* data_im_ptr = data_im;
data_im_ptr += channel_in * (height * width * depth) +
h_in * (width * depth) + w_in * depth + d_in;
for (int i = 0; i < kernel_h; ++i)
{
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j)
{
int w = w_in + j;
for (int k = 0; k < kernel_d; ++k)
{
int d = d_in + k;
*data_col_ptr = (h >= 0 && w >= 0 && d >= 0 &&
h < height && w < width && d < depth) ?
data_im_ptr[i * (width * depth) + j *depth + k] : 0;
data_col_ptr += height_col * width_col * depth_col;
}
}
}
}
}
void im3d2col(hipStream_t stream, const float* data_im, const int channels,
const int height, const int width, const int depth,
const int kernel_h, const int kernel_w, const int kernel_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
float* data_col)
{
// We are going to launch channels * height_col * width_col * depth_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1;
int num_kernels = channels * height_col * width_col * depth_col;
hipLaunchKernelGGL(( im3d2col_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im,
height, width, depth,
kernel_h, kernel_w, kernel_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_col);
}
__global__ void col2im3d_kernel(const int n, const float* data_col,
const int height, const int width, const int depth,
const int channels,
const int patch_h, const int patch_w, const int patch_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
const int height_col, const int width_col, const int depth_col,
float* data_im)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0;
int d = index % depth + pad_d;
int w_index = index / depth;
int w = w_index % width + pad_w;
int h_index = w_index / width;
int h = h_index % height + pad_h;
int c = h_index / height;
// compute the start and end of the output
int d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1;
int d_col_end = min(d / stride_d + 1, depth_col);
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col;
int coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col;
int coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col;
int coeff_d_col = (1 - stride_d * height_col * width_col * depth_col);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col)
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col];
}
}
data_im[index] = val;
}
}
void col2im3d(hipStream_t stream, const float* data_col, const int channels,
const int height, const int width, const int depth,
const int patch_h, const int patch_w, const int patch_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
float* data_im)
{
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1;
int num_kernels = channels * height * width * depth;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im3d_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col,
height, width, depth, channels,
patch_h, patch_w, patch_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_im);
}
static int cunn_VolumetricConvolution_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
// Input
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
// Params:
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 6, input, output, weight,
bias, columns, ones));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1],
input->size[2], input->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize5d(state, output, batchSize, nOutputPlane, outputDepth,
outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kD*kW*kH, outputDepth*outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize3d(state, ones, outputDepth, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputDepth * outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_n), n_
);
// Extract columns:
im3d2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_n), n
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize4d(state, output, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// return output
return 1;
}
static int cunn_VolumetricConvolution_updateGradInput(lua_State *L) {
THCState *state = getCutorchState(L);
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THCudaTensor *weight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradColumns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]);
THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize5d(state, gradInput, batchSize, nInputPlane, inputDepth, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH*kD, outputDepth*outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4];
long n = gradColumns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, gradOutput_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, gradColumns), n
);
// Unpack columns back into input:
col2im3d(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradColumns),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, gradInput_n)
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
THCudaTensor_resize4d(state, gradInput, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// Return gradInput
return 1;
}
static int cunn_VolumetricConvolution_accGradParameters(lua_State *L) {
THCState *state = getCutorchState(L);
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
float scale = luaL_optnumber(L, 4, 1);
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 6, input, gradOutput, gradWeight,
gradBias, columns, ones));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]);
THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize3d(state, ones, outputDepth, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH*kD, outputDepth*outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im3d2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = gradWeight->size[0];
long n = gradWeight->size[1]*gradWeight->size[2]*gradWeight->size[3]*gradWeight->size[4];
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, gradOutput_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputDepth * outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// Return nothing
return 0;
}
static const struct luaL_Reg cunn_VolumetricConvolution__ [] = {
{"VolumetricConvolution_updateOutput", cunn_VolumetricConvolution_updateOutput},
{"VolumetricConvolution_updateGradInput", cunn_VolumetricConvolution_updateGradInput},
{"VolumetricConvolution_accGradParameters", cunn_VolumetricConvolution_accGradParameters},
{NULL, NULL}
};
static void cunn_VolumetricConvolution_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_VolumetricConvolution__, "nn");
lua_pop(L,1);
}
|
499fc410fca6a7b31ffee4b92917c809e8fb88b1.cu
|
#include "utils.h"
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Kernel for fast unfold+copy
// Borrowed from Theano
// Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas
__global__ void im3d2col_kernel(const int n, const float* data_im,
const int height, const int width, const int depth,
const int kernel_h, const int kernel_w, const int kernel_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
const int height_col, const int width_col, const int depth_col,
float* data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int d_out = index % depth_col;
int w_index = index / depth_col;
int w_out = w_index % width_col;
int h_index = w_index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
//channel_in = 1;
int channel_out = channel_in * kernel_h * kernel_w * kernel_d;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
int d_in = d_out * stride_d - pad_d;
float* data_col_ptr = data_col;
data_col_ptr += channel_out * (height_col * width_col * depth_col) +
h_out * (width_col * depth_col) + w_out * depth_col + d_out;
const float* data_im_ptr = data_im;
data_im_ptr += channel_in * (height * width * depth) +
h_in * (width * depth) + w_in * depth + d_in;
for (int i = 0; i < kernel_h; ++i)
{
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j)
{
int w = w_in + j;
for (int k = 0; k < kernel_d; ++k)
{
int d = d_in + k;
*data_col_ptr = (h >= 0 && w >= 0 && d >= 0 &&
h < height && w < width && d < depth) ?
data_im_ptr[i * (width * depth) + j *depth + k] : 0;
data_col_ptr += height_col * width_col * depth_col;
}
}
}
}
}
void im3d2col(cudaStream_t stream, const float* data_im, const int channels,
const int height, const int width, const int depth,
const int kernel_h, const int kernel_w, const int kernel_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
float* data_col)
{
// We are going to launch channels * height_col * width_col * depth_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1;
int num_kernels = channels * height_col * width_col * depth_col;
im3d2col_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im,
height, width, depth,
kernel_h, kernel_w, kernel_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_col);
}
__global__ void col2im3d_kernel(const int n, const float* data_col,
const int height, const int width, const int depth,
const int channels,
const int patch_h, const int patch_w, const int patch_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
const int height_col, const int width_col, const int depth_col,
float* data_im)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0;
int d = index % depth + pad_d;
int w_index = index / depth;
int w = w_index % width + pad_w;
int h_index = w_index / width;
int h = h_index % height + pad_h;
int c = h_index / height;
// compute the start and end of the output
int d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1;
int d_col_end = min(d / stride_d + 1, depth_col);
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col;
int coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col;
int coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col;
int coeff_d_col = (1 - stride_d * height_col * width_col * depth_col);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col)
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col];
}
}
data_im[index] = val;
}
}
void col2im3d(cudaStream_t stream, const float* data_col, const int channels,
const int height, const int width, const int depth,
const int patch_h, const int patch_w, const int patch_d,
const int pad_h, const int pad_w, const int pad_d,
const int stride_h, const int stride_w, const int stride_d,
float* data_im)
{
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1;
int num_kernels = channels * height * width * depth;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im3d_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_col,
height, width, depth, channels,
patch_h, patch_w, patch_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_im);
}
static int cunn_VolumetricConvolution_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
// Input
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
// Params:
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 6, input, output, weight,
bias, columns, ones));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1],
input->size[2], input->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize5d(state, output, batchSize, nOutputPlane, outputDepth,
outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kD*kW*kH, outputDepth*outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize3d(state, ones, outputDepth, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputDepth * outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_n), n_
);
// Extract columns:
im3d2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_n), n
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize4d(state, output, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// return output
return 1;
}
static int cunn_VolumetricConvolution_updateGradInput(lua_State *L) {
THCState *state = getCutorchState(L);
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THCudaTensor *weight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradColumns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]);
THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize5d(state, gradInput, batchSize, nInputPlane, inputDepth, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH*kD, outputDepth*outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4];
long n = gradColumns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, gradOutput_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, gradColumns), n
);
// Unpack columns back into input:
col2im3d(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradColumns),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, gradInput_n)
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
THCudaTensor_resize4d(state, gradInput, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// Return gradInput
return 1;
}
static int cunn_VolumetricConvolution_accGradParameters(lua_State *L) {
THCState *state = getCutorchState(L);
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dD = luaT_getfieldcheckint(L, 1, "dW");
int dW = luaT_getfieldcheckint(L, 1, "dH");
int dH = luaT_getfieldcheckint(L, 1, "dT");
int kD = luaT_getfieldcheckint(L, 1, "kW");
int kW = luaT_getfieldcheckint(L, 1, "kH");
int kH = luaT_getfieldcheckint(L, 1, "kT");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
float scale = luaL_optnumber(L, 4, 1);
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 6, input, gradOutput, gradWeight,
gradBias, columns, ones));
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 4) {
// Force batch
batch = 0;
THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]);
THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long inputDepth = input->size[4];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
long outputDepth = (inputDepth - kD) / dD + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize3d(state, ones, outputDepth, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH*kD, outputDepth*outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im3d2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_n),
nInputPlane, inputHeight, inputWidth, inputDepth, kH, kW, kD, 0, 0, 0, dH, dW, dD,
THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = gradWeight->size[0];
long n = gradWeight->size[1]*gradWeight->size[2]*gradWeight->size[3]*gradWeight->size[4];
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_gemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, gradOutput_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputDepth * outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
THCudaBlas_gemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth);
THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth);
}
// Return nothing
return 0;
}
static const struct luaL_Reg cunn_VolumetricConvolution__ [] = {
{"VolumetricConvolution_updateOutput", cunn_VolumetricConvolution_updateOutput},
{"VolumetricConvolution_updateGradInput", cunn_VolumetricConvolution_updateGradInput},
{"VolumetricConvolution_accGradParameters", cunn_VolumetricConvolution_accGradParameters},
{NULL, NULL}
};
static void cunn_VolumetricConvolution_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_VolumetricConvolution__, "nn");
lua_pop(L,1);
}
|
981ad66318f118766d639eb339c57196f30340e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "CUDA error: ", hipGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
//@@ Define any useful program-wide constants here
#define KERNEL_SIZE 3
#define KERNEL_RADIUS 1
#define TILE_SIZE 8
#define THREADS TILE_SIZE + KERNEL_SIZE - 1
//@@ Define constant memory for device kernel here
__constant__ float Mc[KERNEL_SIZE][KERNEL_SIZE][KERNEL_SIZE];
__global__ void conv3d(float *A, float *B,
const int z_size, const int y_size, const int x_size) {
//@@ Insert kernel code here
//set up shared memory
__shared__ float ds_Input[THREADS][THREADS][THREADS];
//set up variable for block and thread indexes
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
//set up output index since both input and output are same size, there is no need for an input index variable
int y_o = by*TILE_SIZE + ty - KERNEL_RADIUS;
int x_o = bx*TILE_SIZE + tx - KERNEL_RADIUS;
int z_o = bz*TILE_SIZE + tz - KERNEL_RADIUS;
float output = 0.0;
//read in input to shared memory
if ((x_o >= 0) && (x_o < x_size) && (y_o >= 0) && (y_o < y_size) && (z_o >= 0) && (z_o < z_size))
{
ds_Input[tz][ty][tx] = A[z_o*y_size*x_size + y_o*x_size + x_o];
}
else
{
ds_Input[tz][ty][tx] = 0.0;
}
__syncthreads();
x_o++;
y_o++;
z_o++;
//perform the computations
if (tx < TILE_SIZE && ty < TILE_SIZE && tz < TILE_SIZE)
{
for (int i = 0; i < KERNEL_SIZE; i++)
{
for (int j = 0; j < KERNEL_SIZE; j++)
{
for (int k = 0; k < KERNEL_SIZE; k++)
{
if ((x_o >= 0) && (x_o < x_size) && (y_o >= 0) && (y_o < y_size) && (z_o >= 0) && (z_o < z_size))
{
output += Mc[i][j][k] * ds_Input[i + tz][j + ty][k + tx];
}
}
}
}
//write to output
if(z_o<z_size && y_o < y_size && x_o < x_size)
B[(z_o)*y_size*x_size + (y_o)*x_size + x_o] = output;
}
__syncthreads();
}
int main(int argc, char* argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float * hostInput;
float * hostKernel;
float * hostOutput;
float * deviceInput;
float * deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float*)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel = (float*)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float*)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
wbCheck(hipMalloc(&deviceInput, z_size*y_size*x_size*sizeof(float)));
wbCheck(hipMalloc(&deviceOutput, z_size*y_size*x_size*sizeof(float)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and do
// not need to be copied to the gpu
wbCheck(hipMemcpy(deviceInput, &hostInput[3], z_size*y_size*x_size*sizeof(float), hipMemcpyHostToDevice));
wbCheck(hipMemcpyToSymbol(Mc, hostKernel, KERNEL_SIZE*KERNEL_SIZE*KERNEL_SIZE*sizeof(float)));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 grid((x_size - 1) / TILE_SIZE + 1, (y_size - 1) / TILE_SIZE + 1, (z_size - 1) / TILE_SIZE + 1);
dim3 threads(THREADS, THREADS, THREADS);
//@@ Launch the GPU kernel here
conv3d << <grid, threads >> >(deviceInput, deviceOutput, z_size, y_size, x_size);
hipDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
wbCheck(hipMemcpy(&hostOutput[3], deviceOutput, x_size*y_size*z_size*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
hipFree(deviceInput);
hipFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
//hipHostFree(hostOutput); alternate way of freeing?
return 0;
}
|
981ad66318f118766d639eb339c57196f30340e4.cu
|
#include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "CUDA error: ", cudaGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
//@@ Define any useful program-wide constants here
#define KERNEL_SIZE 3
#define KERNEL_RADIUS 1
#define TILE_SIZE 8
#define THREADS TILE_SIZE + KERNEL_SIZE - 1
//@@ Define constant memory for device kernel here
__constant__ float Mc[KERNEL_SIZE][KERNEL_SIZE][KERNEL_SIZE];
__global__ void conv3d(float *A, float *B,
const int z_size, const int y_size, const int x_size) {
//@@ Insert kernel code here
//set up shared memory
__shared__ float ds_Input[THREADS][THREADS][THREADS];
//set up variable for block and thread indexes
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
//set up output index since both input and output are same size, there is no need for an input index variable
int y_o = by*TILE_SIZE + ty - KERNEL_RADIUS;
int x_o = bx*TILE_SIZE + tx - KERNEL_RADIUS;
int z_o = bz*TILE_SIZE + tz - KERNEL_RADIUS;
float output = 0.0;
//read in input to shared memory
if ((x_o >= 0) && (x_o < x_size) && (y_o >= 0) && (y_o < y_size) && (z_o >= 0) && (z_o < z_size))
{
ds_Input[tz][ty][tx] = A[z_o*y_size*x_size + y_o*x_size + x_o];
}
else
{
ds_Input[tz][ty][tx] = 0.0;
}
__syncthreads();
x_o++;
y_o++;
z_o++;
//perform the computations
if (tx < TILE_SIZE && ty < TILE_SIZE && tz < TILE_SIZE)
{
for (int i = 0; i < KERNEL_SIZE; i++)
{
for (int j = 0; j < KERNEL_SIZE; j++)
{
for (int k = 0; k < KERNEL_SIZE; k++)
{
if ((x_o >= 0) && (x_o < x_size) && (y_o >= 0) && (y_o < y_size) && (z_o >= 0) && (z_o < z_size))
{
output += Mc[i][j][k] * ds_Input[i + tz][j + ty][k + tx];
}
}
}
}
//write to output
if(z_o<z_size && y_o < y_size && x_o < x_size)
B[(z_o)*y_size*x_size + (y_o)*x_size + x_o] = output;
}
__syncthreads();
}
int main(int argc, char* argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float * hostInput;
float * hostKernel;
float * hostOutput;
float * deviceInput;
float * deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float*)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel = (float*)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float*)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
wbCheck(cudaMalloc(&deviceInput, z_size*y_size*x_size*sizeof(float)));
wbCheck(cudaMalloc(&deviceOutput, z_size*y_size*x_size*sizeof(float)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and do
// not need to be copied to the gpu
wbCheck(cudaMemcpy(deviceInput, &hostInput[3], z_size*y_size*x_size*sizeof(float), cudaMemcpyHostToDevice));
wbCheck(cudaMemcpyToSymbol(Mc, hostKernel, KERNEL_SIZE*KERNEL_SIZE*KERNEL_SIZE*sizeof(float)));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 grid((x_size - 1) / TILE_SIZE + 1, (y_size - 1) / TILE_SIZE + 1, (z_size - 1) / TILE_SIZE + 1);
dim3 threads(THREADS, THREADS, THREADS);
//@@ Launch the GPU kernel here
conv3d << <grid, threads >> >(deviceInput, deviceOutput, z_size, y_size, x_size);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
wbCheck(cudaMemcpy(&hostOutput[3], deviceOutput, x_size*y_size*z_size*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
cudaFree(deviceInput);
cudaFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
//cudaFreeHost(hostOutput); alternate way of freeing?
return 0;
}
|
f2284df8124de99b7bf5b804e80ee94b1ca74971.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "analysis.h"
#include <stdlib.h>
__device__ type_data calculate_mean(type_data* data, unsigned int count) {
type_data sum = 0.0;
int j;
for(j=0; j<count; ++j) {
sum += data[j];
}
for(j=0; j<count; ++j) {
data[j] -= (type_data)sum/(type_data)count;
}
return sum/(type_data)count;
}
__global__ void mean_adjust_data_g(type_data** data, type_data* means, int count) {
means[threadIdx.x] = calculate_mean(data[threadIdx.x], count);
}
__device__ type_data calculate_covariance(type_data* firstComponent, type_data* secondComponent, int count) {
type_data sum = 0.0;
for(int j=0; j<count; ++j) {
sum += firstComponent[j]*secondComponent[j];
}
return sum/(type_data)count;
}
__global__ void calculate_covariance_matrix(type_data** data, type_data* covMatrix, int count, int dim) {
int i = threadIdx.x;
int j = blockIdx.x;
if(j<i) return;
covMatrix[i * dim + j] = calculate_covariance(data[i], data[j], count);
covMatrix[j * dim + i] = covMatrix[i * dim + j];
}
__global__ void mean_adjust_data_new(type_data** data, type_data* means, int count) {
means[threadIdx.x] = calculate_mean(data[threadIdx.x], count);
}
|
f2284df8124de99b7bf5b804e80ee94b1ca74971.cu
|
#include "analysis.h"
#include <stdlib.h>
__device__ type_data calculate_mean(type_data* data, unsigned int count) {
type_data sum = 0.0;
int j;
for(j=0; j<count; ++j) {
sum += data[j];
}
for(j=0; j<count; ++j) {
data[j] -= (type_data)sum/(type_data)count;
}
return sum/(type_data)count;
}
__global__ void mean_adjust_data_g(type_data** data, type_data* means, int count) {
means[threadIdx.x] = calculate_mean(data[threadIdx.x], count);
}
__device__ type_data calculate_covariance(type_data* firstComponent, type_data* secondComponent, int count) {
type_data sum = 0.0;
for(int j=0; j<count; ++j) {
sum += firstComponent[j]*secondComponent[j];
}
return sum/(type_data)count;
}
__global__ void calculate_covariance_matrix(type_data** data, type_data* covMatrix, int count, int dim) {
int i = threadIdx.x;
int j = blockIdx.x;
if(j<i) return;
covMatrix[i * dim + j] = calculate_covariance(data[i], data[j], count);
covMatrix[j * dim + i] = covMatrix[i * dim + j];
}
__global__ void mean_adjust_data_new(type_data** data, type_data* means, int count) {
means[threadIdx.x] = calculate_mean(data[threadIdx.x], count);
}
|
5dd73485ea33285add6eab21fe1397b49c5ba112.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 512
#define BLOCK_DUB 1024
//Works for power of 2 elements
#define DEFAULT_NUM_ELEMENTS 1024
#define MAX_RAND 2
typedef float REAL;
__global__ void prescan(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL temp[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = 1;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
if (top < DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
temp[2*ti+1] = idata[top];
} else {
temp[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
} else {
temp[2*ti] = 0;
}
}
for (int i = BLOCK_SIZE; i>0; i>>=1)
{
__syncthreads();
if (ti<i)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
temp[bi] += temp[ai];
}
ofs <<= 1;
}
__syncthreads();
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
idata[top] = temp[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
}
}
}
__global__ void downsweep(REAL *odata, REAL *idata, int num, int last)
{
volatile __shared__ REAL tempd[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = BLOCK_DUB;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
if (top < DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
tempd[2*ti+1] = idata[top];
} else {
tempd[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
} else {
tempd[2*ti] = 0;
}
}
if (last == 1) {
tempd[num-1] = 0;
}
for (int j = 1; j<num; j<<=1) //fix
{
ofs >>= 1;
__syncthreads();
if (ti < j)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
REAL temp2 = tempd[ai];
tempd[ai] = tempd[bi];
tempd[bi] += temp2;
}
}
__syncthreads();
if (last == 1) {
if (top < DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
odata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
}
}
} else {
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
idata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
}
}
}
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(REAL *outArray, REAL *inArray, int numElements)
{
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = BLOCK_SIZE;
int nestElements = numElements;
int lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
// Recursive implementation to compute the reduction
hipLaunchKernelGGL(( prescan) , dim3(dimGrid),dim3(threads), 0, 0, outArray, inArray, nestElements);
lastElements = nestElements;
nestElements = blocks;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx, blocksy);
}
//fix
nestElements = lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx,blocksy);
while(nestElements <= DEFAULT_NUM_ELEMENTS)
{
printf("%d \n",nestElements);
// Recursive implementation to compute the downsweep
if (nestElements == DEFAULT_NUM_ELEMENTS) {
hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 1);
nestElements = DEFAULT_NUM_ELEMENTS+1; //fix
} else {
hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 0);
nestElements = BLOCK_DUB*nestElements; //fix
}
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx, blocksy);
}
//downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const REAL* reference, const REAL* data,
const unsigned int len);
extern "C"
void computeGold( REAL* reference, REAL* idata, const unsigned int len);
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( REAL) * num_elements;
REAL* h_data = (REAL*) malloc( mem_size);
switch(argc-1)
{
case 0:
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
default:
num_elements = atoi(argv[1]);
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
hipEvent_t time_start;
hipEvent_t time_end;
hipEventCreate(&time_start);
hipEventCreate(&time_end);
// compute reference solution
REAL* reference = (REAL*) malloc( mem_size);
// cutStartTimer(timer);
hipEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
hipEventRecord(time_end, 0);
hipEventSynchronize(time_end);
hipEventElapsedTime(&host_time, time_start, time_end);
// cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
REAL* d_idata = NULL;
REAL* d_odata = NULL;
hipMalloc( (void**) &d_idata, mem_size);
hipMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice);
// initialize all the other device arrays to be safe
hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
//prescanArray(d_odata, d_idata, 16);
// Run the prescan
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// cutStartTimer(timer);
hipEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, num_elements);
// **===-----------------------------------------------------------===**
hipDeviceSynchronize();
hipEventRecord(time_end, 0);
hipEventSynchronize(time_end);
hipEventElapsedTime(&device_time, time_start, time_end);
hipEventDestroy(time_start);
hipEventDestroy(time_end);
// cutStopTimer(timer);
printf("CUDA Processing time: %g (ms)\n", device_time);
// device_time = cutGetTimerValue(timer);
// printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
hipMemcpy( h_data, d_odata, sizeof(REAL) * num_elements,
hipMemcpyDeviceToHost);
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7);
printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED");
// cleanup memory
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
}
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
REAL diff = fabs(reference[i] - h_data[i]);
REAL denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (!(diff / denominator < err)) {
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
|
5dd73485ea33285add6eab21fe1397b49c5ba112.cu
|
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 512
#define BLOCK_DUB 1024
//Works for power of 2 elements
#define DEFAULT_NUM_ELEMENTS 1024
#define MAX_RAND 2
typedef float REAL;
__global__ void prescan(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL temp[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = 1;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
if (top < DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
temp[2*ti+1] = idata[top];
} else {
temp[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
} else {
temp[2*ti] = 0;
}
}
for (int i = BLOCK_SIZE; i>0; i>>=1)
{
__syncthreads();
if (ti<i)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
temp[bi] += temp[ai];
}
ofs <<= 1;
}
__syncthreads();
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
idata[top] = temp[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
}
}
}
__global__ void downsweep(REAL *odata, REAL *idata, int num, int last)
{
volatile __shared__ REAL tempd[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = BLOCK_DUB;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
if (top < DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
tempd[2*ti+1] = idata[top];
} else {
tempd[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
} else {
tempd[2*ti] = 0;
}
}
if (last == 1) {
tempd[num-1] = 0;
}
for (int j = 1; j<num; j<<=1) //fix
{
ofs >>= 1;
__syncthreads();
if (ti < j)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
REAL temp2 = tempd[ai];
tempd[ai] = tempd[bi];
tempd[bi] += temp2;
}
}
__syncthreads();
if (last == 1) {
if (top < DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
odata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
}
}
} else {
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
idata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
}
}
}
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(REAL *outArray, REAL *inArray, int numElements)
{
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = BLOCK_SIZE;
int nestElements = numElements;
int lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
// Recursive implementation to compute the reduction
prescan <<<dimGrid,threads>>> (outArray, inArray, nestElements);
lastElements = nestElements;
nestElements = blocks;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx, blocksy);
}
//fix
nestElements = lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx,blocksy);
while(nestElements <= DEFAULT_NUM_ELEMENTS)
{
printf("%d \n",nestElements);
// Recursive implementation to compute the downsweep
if (nestElements == DEFAULT_NUM_ELEMENTS) {
downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 1);
nestElements = DEFAULT_NUM_ELEMENTS+1; //fix
} else {
downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 0);
nestElements = BLOCK_DUB*nestElements; //fix
}
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx, blocksy);
}
//downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const REAL* reference, const REAL* data,
const unsigned int len);
extern "C"
void computeGold( REAL* reference, REAL* idata, const unsigned int len);
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( REAL) * num_elements;
REAL* h_data = (REAL*) malloc( mem_size);
switch(argc-1)
{
case 0:
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
default:
num_elements = atoi(argv[1]);
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
cudaEvent_t time_start;
cudaEvent_t time_end;
cudaEventCreate(&time_start);
cudaEventCreate(&time_end);
// compute reference solution
REAL* reference = (REAL*) malloc( mem_size);
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&host_time, time_start, time_end);
// cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
REAL* d_idata = NULL;
REAL* d_odata = NULL;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
// initialize all the other device arrays to be safe
cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
//prescanArray(d_odata, d_idata, 16);
// Run the prescan
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, num_elements);
// **===-----------------------------------------------------------===**
cudaThreadSynchronize();
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&device_time, time_start, time_end);
cudaEventDestroy(time_start);
cudaEventDestroy(time_end);
// cutStopTimer(timer);
printf("CUDA Processing time: %g (ms)\n", device_time);
// device_time = cutGetTimerValue(timer);
// printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(REAL) * num_elements,
cudaMemcpyDeviceToHost);
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7);
printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED");
// cleanup memory
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
REAL diff = fabs(reference[i] - h_data[i]);
REAL denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (!(diff / denominator < err)) {
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
|
df4843caa192bfb6725b789004bd93e2892485ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#include "dali/core/convert.h"
#include "dali/core/cuda_utils.h"
#include "dali/core/error_handling.h"
#include "dali/core/static_switch.h"
#include "dali/kernels/common/block_setup.h"
#include "dali/operators/generic/cast.h"
namespace dali {
template <typename OType, typename IType>
__global__ void BatchedCastKernel(const CastSampleDesc *samples,
const kernels::BlockDesc<1> *blocks) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
auto *out = reinterpret_cast<OType *>(sample.output);
const auto *in = reinterpret_cast<const IType *>(sample.input);
for (int x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) {
out[x] = ConvertSat<OType>(in[x]);
}
}
template <>
void Cast<GPUBackend>::PrepareBlocks(const DeviceWorkspace &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
const auto &input_shape = input.shape();
std::array<std::pair<int, int>, 1> collapse_groups = {{{0, input_shape.sample_dim()}}};
auto collapsed_shape = collapse_dims<1>(input.shape(), collapse_groups);
block_setup_.SetupBlocks(collapsed_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), ws.stream());
}
template <>
void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
const auto &input_shape = input.shape();
auto &output = ws.OutputRef<GPUBackend>(0);
output.SetLayout(input.GetLayout());
auto num_samples = input_shape.num_samples();
samples_.resize(num_samples);
for (int sample_id = 0; sample_id < num_samples; sample_id++) {
samples_[sample_id].output = output.raw_mutable_tensor(sample_id);
samples_[sample_id].input = input.raw_tensor(sample_id);
}
samples_dev_.from_host(samples_, ws.stream());
DALIDataType itype = input.type().id();
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, (
TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, (
hipLaunchKernelGGL(( BatchedCastKernel<OType, IType>)
, dim3(grid_dim), dim3(block_dim), 0, ws.stream(), samples_dev_.data(), blocks_dev_.data());
), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens)
), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens)
}
DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU);
} // namespace dali
|
df4843caa192bfb6725b789004bd93e2892485ce.cu
|
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#include "dali/core/convert.h"
#include "dali/core/cuda_utils.h"
#include "dali/core/error_handling.h"
#include "dali/core/static_switch.h"
#include "dali/kernels/common/block_setup.h"
#include "dali/operators/generic/cast.h"
namespace dali {
template <typename OType, typename IType>
__global__ void BatchedCastKernel(const CastSampleDesc *samples,
const kernels::BlockDesc<1> *blocks) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
auto *out = reinterpret_cast<OType *>(sample.output);
const auto *in = reinterpret_cast<const IType *>(sample.input);
for (int x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) {
out[x] = ConvertSat<OType>(in[x]);
}
}
template <>
void Cast<GPUBackend>::PrepareBlocks(const DeviceWorkspace &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
const auto &input_shape = input.shape();
std::array<std::pair<int, int>, 1> collapse_groups = {{{0, input_shape.sample_dim()}}};
auto collapsed_shape = collapse_dims<1>(input.shape(), collapse_groups);
block_setup_.SetupBlocks(collapsed_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), ws.stream());
}
template <>
void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
const auto &input = ws.InputRef<GPUBackend>(0);
const auto &input_shape = input.shape();
auto &output = ws.OutputRef<GPUBackend>(0);
output.SetLayout(input.GetLayout());
auto num_samples = input_shape.num_samples();
samples_.resize(num_samples);
for (int sample_id = 0; sample_id < num_samples; sample_id++) {
samples_[sample_id].output = output.raw_mutable_tensor(sample_id);
samples_[sample_id].input = input.raw_tensor(sample_id);
}
samples_dev_.from_host(samples_, ws.stream());
DALIDataType itype = input.type().id();
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, (
TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, (
BatchedCastKernel<OType, IType>
<<<grid_dim, block_dim, 0, ws.stream()>>>(samples_dev_.data(), blocks_dev_.data());
), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens)
), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens)
}
DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU);
} // namespace dali
|
3361f12ad80e4c2b8b629cad19effc0cc2650e8c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
namespace oneflow {
namespace {
template<typename T, int32_t N>
struct Param {
const T* in[N];
T* out;
};
template<typename T, int32_t N>
__global__ void gpu_add(const int64_t n, Param<T, N> para) {
if (para.out == para.in[0]) {
CUDA_1D_KERNEL_LOOP(i, n) {
T tmp = 0;
#pragma unroll
for (int j = 1; j < N; ++j) { tmp += para.in[j][i]; }
if (tmp != 0) { para.out[i] += tmp; }
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
T tmp = para.in[0][i];
#pragma unroll
for (int j = 1; j < N; ++j) { tmp += para.in[j][i]; }
para.out[i] = tmp;
}
}
}
template<typename T, int32_t N>
struct GpuAddCaller {
static void call(user_op::KernelComputeContext* ctx) {
CHECK_EQ(N, ctx->inputs().size());
Param<T, N> para;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int64_t n = out->shape().elem_cnt();
para.out = out->mut_dptr<T>();
for (int32_t i = 0; i < N; ++i) {
para.in[i] = ctx->Tensor4ArgNameAndIndex("in", i)->dptr<T>();
}
if (n == 0) { return; }
hipLaunchKernelGGL(( gpu_add<T, N>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, para);
}
};
using CallFn = std::function<void(user_op::KernelComputeContext*)>;
using AddNKernelRegistry = std::map<int32_t, CallFn>;
#define ADD_NUM_PARAM_SEQ \
OF_PP_MAKE_TUPLE_SEQ(2) \
OF_PP_MAKE_TUPLE_SEQ(3) \
OF_PP_MAKE_TUPLE_SEQ(4) \
OF_PP_MAKE_TUPLE_SEQ(5) \
OF_PP_MAKE_TUPLE_SEQ(6) \
OF_PP_MAKE_TUPLE_SEQ(7) \
OF_PP_MAKE_TUPLE_SEQ(8)
template<typename T>
const AddNKernelRegistry& SingletonRegistry() {
static AddNKernelRegistry s_registry = {
#define REG_ENTRY(n) {n, &GpuAddCaller<T, n>::call},
OF_PP_FOR_EACH_TUPLE(REG_ENTRY, ADD_NUM_PARAM_SEQ)
#undef REG_ENTRY
};
return s_registry;
}
template<typename T>
const CallFn* LookUpInRegistry(int32_t in_num) {
auto it = SingletonRegistry<T>().find(in_num);
if (it == SingletonRegistry<T>().end()) { return nullptr; }
return &(it->second);
}
} // namespace
template<typename T>
class GpuAddNKernel : public user_op::OpKernel {
public:
GpuAddNKernel() = default;
~GpuAddNKernel() = default;
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
int32_t in_num = ctx->inputs().size();
const auto* caller = LookUpInRegistry<T>(in_num);
CHECK(caller != nullptr) << "GpuAddNKernel: Cannot find registered funtion for in_num: "
<< in_num << " of data_type: " << DataType_Name(GetDataType<T>::value);
(*caller)(ctx);
}
};
#define REGISTER_GPU_ADDN_KERNEL(cpp_type, dtype) \
REGISTER_USER_KERNEL("add_n") \
.SetCreateFn<GpuAddNKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("in", 0) == dtype)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
OF_PP_FOR_EACH_TUPLE(REGISTER_GPU_ADDN_KERNEL, ARITHMETIC_DATA_TYPE_SEQ);
namespace {
template<int32_t N>
__global__ void gpu_half_add(const int64_t n, Param<half, N> para) {
if (para.out == para.in[0]) {
CUDA_1D_KERNEL_LOOP(i, n) {
half tmp = 0;
#pragma unroll
for (int j = 1; j < N; ++j) { tmp = __hadd(tmp, para.in[j][i]); }
para.out[i] = __hadd(para.out[i], tmp);
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
half tmp = para.in[0][i];
#pragma unroll
for (int j = 1; j < N; ++j) { tmp = __hadd(tmp, para.in[j][i]); }
para.out[i] = tmp;
}
}
}
template<int32_t N>
struct GpuAddCaller<float16, N> {
static void call(user_op::KernelComputeContext* ctx) {
CHECK_EQ(N, ctx->inputs().size());
Param<half, N> para;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int64_t n = out->shape().elem_cnt();
para.out = reinterpret_cast<half*>(out->mut_dptr<float16>());
for (int32_t i = 0; i < N; ++i) {
para.in[i] =
reinterpret_cast<const half*>(ctx->Tensor4ArgNameAndIndex("in", i)->dptr<float16>());
}
hipLaunchKernelGGL(( gpu_half_add<N>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, para);
}
};
} // namespace
class GpuAddNHalfKernel : public user_op::OpKernel {
public:
GpuAddNHalfKernel() = default;
~GpuAddNHalfKernel() = default;
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
int32_t in_num = ctx->inputs().size();
const auto* caller = LookUpInRegistry<float16>(in_num);
CHECK(caller != nullptr) << "GpuAddNHalfKernel: Cannot find registered funtion for in_num: "
<< in_num << " of data_type: " << DataType_Name(DataType::kFloat16);
(*caller)(ctx);
}
};
REGISTER_USER_KERNEL("add_n")
.SetCreateFn<GpuAddNHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("in", 0) == DataType::kFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
} // namespace oneflow
|
3361f12ad80e4c2b8b629cad19effc0cc2650e8c.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
namespace oneflow {
namespace {
template<typename T, int32_t N>
struct Param {
const T* in[N];
T* out;
};
template<typename T, int32_t N>
__global__ void gpu_add(const int64_t n, Param<T, N> para) {
if (para.out == para.in[0]) {
CUDA_1D_KERNEL_LOOP(i, n) {
T tmp = 0;
#pragma unroll
for (int j = 1; j < N; ++j) { tmp += para.in[j][i]; }
if (tmp != 0) { para.out[i] += tmp; }
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
T tmp = para.in[0][i];
#pragma unroll
for (int j = 1; j < N; ++j) { tmp += para.in[j][i]; }
para.out[i] = tmp;
}
}
}
template<typename T, int32_t N>
struct GpuAddCaller {
static void call(user_op::KernelComputeContext* ctx) {
CHECK_EQ(N, ctx->inputs().size());
Param<T, N> para;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int64_t n = out->shape().elem_cnt();
para.out = out->mut_dptr<T>();
for (int32_t i = 0; i < N; ++i) {
para.in[i] = ctx->Tensor4ArgNameAndIndex("in", i)->dptr<T>();
}
if (n == 0) { return; }
gpu_add<T, N>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, para);
}
};
using CallFn = std::function<void(user_op::KernelComputeContext*)>;
using AddNKernelRegistry = std::map<int32_t, CallFn>;
#define ADD_NUM_PARAM_SEQ \
OF_PP_MAKE_TUPLE_SEQ(2) \
OF_PP_MAKE_TUPLE_SEQ(3) \
OF_PP_MAKE_TUPLE_SEQ(4) \
OF_PP_MAKE_TUPLE_SEQ(5) \
OF_PP_MAKE_TUPLE_SEQ(6) \
OF_PP_MAKE_TUPLE_SEQ(7) \
OF_PP_MAKE_TUPLE_SEQ(8)
template<typename T>
const AddNKernelRegistry& SingletonRegistry() {
static AddNKernelRegistry s_registry = {
#define REG_ENTRY(n) {n, &GpuAddCaller<T, n>::call},
OF_PP_FOR_EACH_TUPLE(REG_ENTRY, ADD_NUM_PARAM_SEQ)
#undef REG_ENTRY
};
return s_registry;
}
template<typename T>
const CallFn* LookUpInRegistry(int32_t in_num) {
auto it = SingletonRegistry<T>().find(in_num);
if (it == SingletonRegistry<T>().end()) { return nullptr; }
return &(it->second);
}
} // namespace
template<typename T>
class GpuAddNKernel : public user_op::OpKernel {
public:
GpuAddNKernel() = default;
~GpuAddNKernel() = default;
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
int32_t in_num = ctx->inputs().size();
const auto* caller = LookUpInRegistry<T>(in_num);
CHECK(caller != nullptr) << "GpuAddNKernel: Cannot find registered funtion for in_num: "
<< in_num << " of data_type: " << DataType_Name(GetDataType<T>::value);
(*caller)(ctx);
}
};
#define REGISTER_GPU_ADDN_KERNEL(cpp_type, dtype) \
REGISTER_USER_KERNEL("add_n") \
.SetCreateFn<GpuAddNKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("in", 0) == dtype)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
OF_PP_FOR_EACH_TUPLE(REGISTER_GPU_ADDN_KERNEL, ARITHMETIC_DATA_TYPE_SEQ);
namespace {
template<int32_t N>
__global__ void gpu_half_add(const int64_t n, Param<half, N> para) {
if (para.out == para.in[0]) {
CUDA_1D_KERNEL_LOOP(i, n) {
half tmp = 0;
#pragma unroll
for (int j = 1; j < N; ++j) { tmp = __hadd(tmp, para.in[j][i]); }
para.out[i] = __hadd(para.out[i], tmp);
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
half tmp = para.in[0][i];
#pragma unroll
for (int j = 1; j < N; ++j) { tmp = __hadd(tmp, para.in[j][i]); }
para.out[i] = tmp;
}
}
}
template<int32_t N>
struct GpuAddCaller<float16, N> {
static void call(user_op::KernelComputeContext* ctx) {
CHECK_EQ(N, ctx->inputs().size());
Param<half, N> para;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int64_t n = out->shape().elem_cnt();
para.out = reinterpret_cast<half*>(out->mut_dptr<float16>());
for (int32_t i = 0; i < N; ++i) {
para.in[i] =
reinterpret_cast<const half*>(ctx->Tensor4ArgNameAndIndex("in", i)->dptr<float16>());
}
gpu_half_add<N>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, para);
}
};
} // namespace
class GpuAddNHalfKernel : public user_op::OpKernel {
public:
GpuAddNHalfKernel() = default;
~GpuAddNHalfKernel() = default;
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
int32_t in_num = ctx->inputs().size();
const auto* caller = LookUpInRegistry<float16>(in_num);
CHECK(caller != nullptr) << "GpuAddNHalfKernel: Cannot find registered funtion for in_num: "
<< in_num << " of data_type: " << DataType_Name(DataType::kFloat16);
(*caller)(ctx);
}
};
REGISTER_USER_KERNEL("add_n")
.SetCreateFn<GpuAddNHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("in", 0) == DataType::kFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
} // namespace oneflow
|
1b95755ba164dc7700c97e058f6cabd0c82ecedc.hip
|
// !!! This is a file automatically generated by hipify!!!
// ##include <bits/stdc++.h>
//sorting in thrust https://stackoverflow.com/questions/23541503/sorting-arrays-of-structures-in-cuda/23645954
//Merge SearchSet http://on-demand.gputechconf.com/gtc/2013/presentations/S3414-Efficient-Merge-Search-Set-Operations.pdf
//thrust SET https://thrust.github.io/doc/group__set__operations.html
//maximum int = 2,147,483,647
//min int = -2,147,483,648
//scp -r /home/awd/work/coursework/DS295/project/pp_project/parallel/* [email protected]:/home/dtanwar/Project/Parallel_Programming_Project/parallel
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <functional>
#include <iostream>
#include <fstream>
#include <climits>
#include<cuda.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define GS 1024
#define BS 1024
using namespace std;
typedef int uui;
typedef int var;
typedef struct{
var V; //no of vertices
var E; //no of edges
var n; //no of non empty rows
//var E;
uui *colind; //nonzeroes in each row (colind)
uui *roff; //startig offset of each row (rowoff)
uui *rlen; //length of each row
uui *rows; //indices of the non empty rows
} G;
__device__ int L;
__global__ void reset_bitmap(var *bitmap , var blockid, var V){
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= V*blockid && index < V*(blockid+1)){
atomicAnd(bitmap + index , 0);
}
}
__global__ void find(var *data, var value, /*int min_idx,*/ var io_s, var rlen_i){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= io_s && idx<= rlen_i){
if(data[idx] == value)
atomicMin(&L, idx);
}
}
//cudamalloc colind , roff , rows , rlen , bitmap , E , V ,n , supp,k ;
__global__ void getmajorsupport(uui* d_colind, uui* d_roff, uui* d_rows, uui* d_rlen, var* bitmap, var E, var V, var n, uui* d_supp, var K){
__shared__ int broadcast[BS]; //TODO: 2d array! why?
/* if(threadIdx.x==0 && blockIdx.x==1)
// {
// printf("\nkernel threadId.x=%d blockid.x=%d E=%d V=%d n=%d K=%d\n",threadIdx.x,blockIdx.x,E,V,n,K );
// // for (var i=0;i<(n) ;i++)
// // printf("%d ",d_rows[i]);
// // printf("\n");
// // printf("rows\n");
// __syncthreads();
// printf("colind\n");
// for(var i=0;i<E;i++)
// printf("%d ",d_colind[i] );
// printf("\n");
// __syncthreads();
// // printf("roff\n" );
// // for(var i=0;i<V+1;i++)
// // printf("%d ",d_roff[i]);
// // printf("\n");
// // printf("rlen\n");
// // for(var i=0;i<V;i++)
// // printf("%d ",d_rlen[i]);
// // printf("\n");
//
//
//
// } */
var i,io_s,io_e,j,jo_s,jo_e,jo,io,c,count,k;
for (var s = 0 ; s<n ; s+=gridDim.x){
printf("Inside kernel\n");
i = d_rows[s];
io_s = d_roff[i];
io_e = io_s + d_rlen[i];
printf("Inside 4\n");
for (io=io_s ; io < io_e ; io += blockDim.x){
printf("Inside 5, io=%d", io);
c = (io + threadIdx.x < io_e) ? d_colind[io + threadIdx.x] : -1;
printf("Inside 6, c=%d ", c);
if (c > -1){
atomicOr ((bitmap + (V * blockIdx.x) +c) , 1);
broadcast[threadIdx.x] = c;
printf("Inside 1\n");
}
__syncthreads();
for (var t=0 ; t < blockDim.x ; t++){
j = broadcast[t];
printf("Inside 2\n");
if (j == -1) break;
count = 0;
jo_s = d_roff[j];
jo_e = jo_s + d_rlen[j];
for(jo = jo_s + threadIdx.x ; jo < jo_e ; jo += blockDim.x){
k = d_colind[jo];
if(bitmap[V * blockIdx.x + k] == 1){
count ++;
atomicAdd(d_supp + jo , 1);
// find<<< E/1024 +1, 1024 >>>(d_colind, k , /*&L,*/ io_s, d_rlen[i]);
for(L=0; L <= d_rlen[i] ; L++)
if (d_colind[io_s + L] == k)
break;
printf("Before: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
atomicAdd(d_supp + io_s + L , 1);
printf("After: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
}
}
atomicAdd(d_supp + io + t , count);
}
}
// for(var x = V*blockIdx.x, i=0; i<V/*x< V*(blockIdx.x + 1)*/ ; i++,x++){
// atomicAnd(bitmap + x , 0);
// }
atomicAnd(bitmap + (V * blockIdx.x) + c , 0);
//reset_bitmap<<< GS,BS >>> (bitmap, blockIdx.x,V);
}
__syncthreads();
}
// #include "read_graph.hpp"
ifstream fin;
ofstream fout;
string infile, outfile;
void readGraph(string filename, G *g){
// cout<<"inside readGraph"<<endl;
// infile ="../../../input/" + name + ".mmio" ; // ../../../input/amazon0302_adj.mmio
// outfile="../../output/serial/" + name + ".txt" ; // dataset+"-out.txt";
infile =filename;
cout<<infile<<endl;
fin.open(infile.c_str()); // opening the input file
fout.open(outfile.c_str()); // opening the output file
string temp;
//getline(fin,temp); // readint the description line 1
//getline(fin,temp); // reading the description line 2
var temp_edge; // temperory edge because edge weight is useless
var u,v; // the v1,v2 of edges
fin >> g->V >> g->E ; // reading the MxN graph and edges
cout<< g->V<<" "<< g->E<<endl; // just checking if it worked
/**************************allocating & initializing all flag[V] to false**********************************/
// bool flag[g->V]; // tells whether particular row is empty or not
// for (var i=0 ; i < g->V ; i++) {
// flag[i] = false; // false means empty
// }
thrust::device_vector<bool> flag(g->V);
thrust::fill(flag.begin(), flag.end(),0);
/**************************allocating & initializing all roff[V+1] to zero**********************************/
g->roff = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(g->roff != NULL);
for (var i=0 ; i < g->V+1 ; i++) {
g->roff[i] = 0;
//cout<<g->roff[i]<<" ";
};
//cout<<endl;
/**************************increase row offset and set flag for non empty row********************************/
for (var i=0; i<g->E; ++i) { //thrust
fin >> u >> v;
//cout<< u <<" "<<v <<endl;
if(u > v)
g->roff[u+1]++ , flag[u] = true;
else if(u < v)
g->roff[v+1]++ , flag[v] = true;
}
/**********************populates indexs of nonzero rows rows[n] and initilizes n (no of non empty rows)******/
g->rows = (uui *) malloc((g->V) * sizeof(uui));
g->n = 0;
var k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true){
g->n++; //thrust
g->rows[k++] = i; //thrust
}
}
/**********************************************************************************************************/
//converting the roff from degree holder to actual usage.
uui *temp_num_edges = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(temp_num_edges != NULL);
temp_num_edges[0] = 0;
//g->E= 0;
k=0;
for(var i = 0; i < g->V; i++) {
// g->E += g->roff[i];
k += g->roff[i+1];
temp_num_edges[i+1] =k;
}
for(var i= 0; i < g->V+1; i++)
g->roff[i] = temp_num_edges[i];
/**********************************************************************************************************/
g->rlen = (uui *) malloc((g->V) * sizeof(uui));
k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true)
g->rlen[k] = g->roff[i+1] - g->roff[i];
else
g->rlen[k] = 0;
k++;
}
/**********************************************************************************************************/
//Allocate space for colind
g->colind = (uui *) malloc(g->E * sizeof(uui));
assert(g->colind != NULL);
fin.close();
fin.open(infile.c_str());
// getline(fin,temp); // readint the description line 1
// getline(fin,temp); // reading the description line 2
//Read V and E
//fscanf(infp, "%ld %ld\n", &(g->n), &g->E);
fin>>g->V>>g->E;
for(var i = 0; i < g->E; i++)
g->colind[i] = 0;
//Read the edges
// while( fscanf(infp, "%u %u\n", &u, &v) != EOF ) {
for(var i=0 ; i<g->E ; i++){
fin>>u>>v;
if(u>v){
g->colind[ temp_num_edges[u] ] = v;
temp_num_edges[u]++;
}
else if (u<v){
g->colind[ temp_num_edges[v] ] = u;
temp_num_edges[v]++;
}
}
fin.close();
printf("readGraph E=%d V=%d n=%d \n",g->E,g->V,g->n );
cout<<"Read the graph"<<endl;
/**********************************************************************************************************/
}
int main(int argc, char *argv[]){
G g;
// cout<<endl<<"checkpoint 1"<<endl;
char* file_path=argv[1];
readGraph(file_path,&g);
printf("main E=%d V=%d n=%d\n",g.E,g.V,g.n );
// cout<<"checkpoint 2"<<endl;
// cout<<"rows"<<endl;
// for (var i=0;i<(g.n) ;i++){
// cout<<g.rows[i]<<" ";
// }
// cout<<endl;
// cout<<"colind"<<endl;
// for (var i=0;i<(g.E) ;i++){
// cout<<g.colind[i]<<" ";
// }
// cout<<endl;
// cout<<"roff"<<endl;
// for (var i=0;i<(g.V+1) ;i++){
// cout<<g.roff[i]<<" ";
// }
// cout<<endl;
// cout<<"rlen"<<endl;
// for (var i=0;i<(g.V) ;i++){
// cout<<g.rlen[i]<<" ";
// }
// cout<<endl;
// hipMalloc( (void **) &d_rows, size );
// hipMalloc( (void **) &d_colind, size );
// hipMalloc( (void **) &d_roff, size );
// hipMalloc( (void **) &d_rlen, size );g->
//
// for (var i=0;i< g->n ;i++)
// rows[i] =
thrust::device_vector<uui> d_rows ( g.rows , g.rows + g.n);
thrust::device_vector<uui> d_colind (g.colind , g.colind+ g.E);
thrust::device_vector<uui> d_roff (g.roff , g.roff + g.V + 1 );
thrust::device_vector<uui> d_rlen (g.rlen , g.rlen + g.V);
thrust::device_vector<var> bitmap (GS*g.V);
thrust::fill(bitmap.begin(), bitmap.end(),0);
thrust::device_vector<uui> support(g.E);
thrust::fill(support.begin(), support.end(),0);
uui *d_rows1 = thrust::raw_pointer_cast(&d_rows[0]);
uui *d_colind1 = thrust::raw_pointer_cast(&d_colind[0]);
uui *d_roff1 = thrust::raw_pointer_cast(&d_roff[0]);
uui *d_rlen1 = thrust::raw_pointer_cast(&d_rlen[0]);
uui *d_support1 = thrust::raw_pointer_cast(&support[0]);
var *d_bitmap1 = thrust::raw_pointer_cast(&bitmap[0]);
hipEvent_t start, stop;
float elapsedTime;
var k=3;
var call=1;
while(call){
if (k>3)
break;
if(k==3)
{
cout<<"Calling Kernel"<<endl;
printf("E=%d V=%d n=%d K=%d\n",g.E,g.V,g.n,k );
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( getmajorsupport), dim3(GS),dim3(BS), 0, 0, d_colind1,d_roff1,d_rows1,d_rlen1,d_bitmap1,g.V,g.E,g.n,d_support1,k);
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipDeviceSynchronize();
cout<<"Out of kernel"<<endl;
call=0;
}
}
// int i;
// cout << "support[" << 0 << "] = " << support[0] << endl;
// for( i = 0; i < support.size(); i++)
// cout << "support[" << i << "] = " << support[i] << endl;
// return 0;
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
}
|
1b95755ba164dc7700c97e058f6cabd0c82ecedc.cu
|
// ##include <bits/stdc++.h>
//sorting in thrust https://stackoverflow.com/questions/23541503/sorting-arrays-of-structures-in-cuda/23645954
//Merge SearchSet http://on-demand.gputechconf.com/gtc/2013/presentations/S3414-Efficient-Merge-Search-Set-Operations.pdf
//thrust SET https://thrust.github.io/doc/group__set__operations.html
//maximum int = 2,147,483,647
//min int = -2,147,483,648
//scp -r /home/awd/work/coursework/DS295/project/pp_project/parallel/* [email protected]:/home/dtanwar/Project/Parallel_Programming_Project/parallel
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <functional>
#include <iostream>
#include <fstream>
#include <climits>
#include<cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define GS 1024
#define BS 1024
using namespace std;
typedef int uui;
typedef int var;
typedef struct{
var V; //no of vertices
var E; //no of edges
var n; //no of non empty rows
//var E;
uui *colind; //nonzeroes in each row (colind)
uui *roff; //startig offset of each row (rowoff)
uui *rlen; //length of each row
uui *rows; //indices of the non empty rows
} G;
__device__ int L;
__global__ void reset_bitmap(var *bitmap , var blockid, var V){
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= V*blockid && index < V*(blockid+1)){
atomicAnd(bitmap + index , 0);
}
}
__global__ void find(var *data, var value, /*int min_idx,*/ var io_s, var rlen_i){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= io_s && idx<= rlen_i){
if(data[idx] == value)
atomicMin(&L, idx);
}
}
//cudamalloc colind , roff , rows , rlen , bitmap , E , V ,n , supp,k ;
__global__ void getmajorsupport(uui* d_colind, uui* d_roff, uui* d_rows, uui* d_rlen, var* bitmap, var E, var V, var n, uui* d_supp, var K){
__shared__ int broadcast[BS]; //TODO: 2d array! why?
/* if(threadIdx.x==0 && blockIdx.x==1)
// {
// printf("\nkernel threadId.x=%d blockid.x=%d E=%d V=%d n=%d K=%d\n",threadIdx.x,blockIdx.x,E,V,n,K );
// // for (var i=0;i<(n) ;i++)
// // printf("%d ",d_rows[i]);
// // printf("\n");
// // printf("rows\n");
// __syncthreads();
// printf("colind\n");
// for(var i=0;i<E;i++)
// printf("%d ",d_colind[i] );
// printf("\n");
// __syncthreads();
// // printf("roff\n" );
// // for(var i=0;i<V+1;i++)
// // printf("%d ",d_roff[i]);
// // printf("\n");
// // printf("rlen\n");
// // for(var i=0;i<V;i++)
// // printf("%d ",d_rlen[i]);
// // printf("\n");
//
//
//
// } */
var i,io_s,io_e,j,jo_s,jo_e,jo,io,c,count,k;
for (var s = 0 ; s<n ; s+=gridDim.x){
printf("Inside kernel\n");
i = d_rows[s];
io_s = d_roff[i];
io_e = io_s + d_rlen[i];
printf("Inside 4\n");
for (io=io_s ; io < io_e ; io += blockDim.x){
printf("Inside 5, io=%d", io);
c = (io + threadIdx.x < io_e) ? d_colind[io + threadIdx.x] : -1;
printf("Inside 6, c=%d ", c);
if (c > -1){
atomicOr ((bitmap + (V * blockIdx.x) +c) , 1);
broadcast[threadIdx.x] = c;
printf("Inside 1\n");
}
__syncthreads();
for (var t=0 ; t < blockDim.x ; t++){
j = broadcast[t];
printf("Inside 2\n");
if (j == -1) break;
count = 0;
jo_s = d_roff[j];
jo_e = jo_s + d_rlen[j];
for(jo = jo_s + threadIdx.x ; jo < jo_e ; jo += blockDim.x){
k = d_colind[jo];
if(bitmap[V * blockIdx.x + k] == 1){
count ++;
atomicAdd(d_supp + jo , 1);
// find<<< E/1024 +1, 1024 >>>(d_colind, k , /*&L,*/ io_s, d_rlen[i]);
for(L=0; L <= d_rlen[i] ; L++)
if (d_colind[io_s + L] == k)
break;
printf("Before: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
atomicAdd(d_supp + io_s + L , 1);
printf("After: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
}
}
atomicAdd(d_supp + io + t , count);
}
}
// for(var x = V*blockIdx.x, i=0; i<V/*x< V*(blockIdx.x + 1)*/ ; i++,x++){
// atomicAnd(bitmap + x , 0);
// }
atomicAnd(bitmap + (V * blockIdx.x) + c , 0);
//reset_bitmap<<< GS,BS >>> (bitmap, blockIdx.x,V);
}
__syncthreads();
}
// #include "read_graph.hpp"
ifstream fin;
ofstream fout;
string infile, outfile;
void readGraph(string filename, G *g){
// cout<<"inside readGraph"<<endl;
// infile ="../../../input/" + name + ".mmio" ; // ../../../input/amazon0302_adj.mmio
// outfile="../../output/serial/" + name + ".txt" ; // dataset+"-out.txt";
infile =filename;
cout<<infile<<endl;
fin.open(infile.c_str()); // opening the input file
fout.open(outfile.c_str()); // opening the output file
string temp;
//getline(fin,temp); // readint the description line 1
//getline(fin,temp); // reading the description line 2
var temp_edge; // temperory edge because edge weight is useless
var u,v; // the v1,v2 of edges
fin >> g->V >> g->E ; // reading the MxN graph and edges
cout<< g->V<<" "<< g->E<<endl; // just checking if it worked
/**************************allocating & initializing all flag[V] to false**********************************/
// bool flag[g->V]; // tells whether particular row is empty or not
// for (var i=0 ; i < g->V ; i++) {
// flag[i] = false; // false means empty
// }
thrust::device_vector<bool> flag(g->V);
thrust::fill(flag.begin(), flag.end(),0);
/**************************allocating & initializing all roff[V+1] to zero**********************************/
g->roff = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(g->roff != NULL);
for (var i=0 ; i < g->V+1 ; i++) {
g->roff[i] = 0;
//cout<<g->roff[i]<<" ";
};
//cout<<endl;
/**************************increase row offset and set flag for non empty row********************************/
for (var i=0; i<g->E; ++i) { //thrust
fin >> u >> v;
//cout<< u <<" "<<v <<endl;
if(u > v)
g->roff[u+1]++ , flag[u] = true;
else if(u < v)
g->roff[v+1]++ , flag[v] = true;
}
/**********************populates indexs of nonzero rows rows[n] and initilizes n (no of non empty rows)******/
g->rows = (uui *) malloc((g->V) * sizeof(uui));
g->n = 0;
var k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true){
g->n++; //thrust
g->rows[k++] = i; //thrust
}
}
/**********************************************************************************************************/
//converting the roff from degree holder to actual usage.
uui *temp_num_edges = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(temp_num_edges != NULL);
temp_num_edges[0] = 0;
//g->E= 0;
k=0;
for(var i = 0; i < g->V; i++) {
// g->E += g->roff[i];
k += g->roff[i+1];
temp_num_edges[i+1] =k;
}
for(var i= 0; i < g->V+1; i++)
g->roff[i] = temp_num_edges[i];
/**********************************************************************************************************/
g->rlen = (uui *) malloc((g->V) * sizeof(uui));
k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true)
g->rlen[k] = g->roff[i+1] - g->roff[i];
else
g->rlen[k] = 0;
k++;
}
/**********************************************************************************************************/
//Allocate space for colind
g->colind = (uui *) malloc(g->E * sizeof(uui));
assert(g->colind != NULL);
fin.close();
fin.open(infile.c_str());
// getline(fin,temp); // readint the description line 1
// getline(fin,temp); // reading the description line 2
//Read V and E
//fscanf(infp, "%ld %ld\n", &(g->n), &g->E);
fin>>g->V>>g->E;
for(var i = 0; i < g->E; i++)
g->colind[i] = 0;
//Read the edges
// while( fscanf(infp, "%u %u\n", &u, &v) != EOF ) {
for(var i=0 ; i<g->E ; i++){
fin>>u>>v;
if(u>v){
g->colind[ temp_num_edges[u] ] = v;
temp_num_edges[u]++;
}
else if (u<v){
g->colind[ temp_num_edges[v] ] = u;
temp_num_edges[v]++;
}
}
fin.close();
printf("readGraph E=%d V=%d n=%d \n",g->E,g->V,g->n );
cout<<"Read the graph"<<endl;
/**********************************************************************************************************/
}
int main(int argc, char *argv[]){
G g;
// cout<<endl<<"checkpoint 1"<<endl;
char* file_path=argv[1];
readGraph(file_path,&g);
printf("main E=%d V=%d n=%d\n",g.E,g.V,g.n );
// cout<<"checkpoint 2"<<endl;
// cout<<"rows"<<endl;
// for (var i=0;i<(g.n) ;i++){
// cout<<g.rows[i]<<" ";
// }
// cout<<endl;
// cout<<"colind"<<endl;
// for (var i=0;i<(g.E) ;i++){
// cout<<g.colind[i]<<" ";
// }
// cout<<endl;
// cout<<"roff"<<endl;
// for (var i=0;i<(g.V+1) ;i++){
// cout<<g.roff[i]<<" ";
// }
// cout<<endl;
// cout<<"rlen"<<endl;
// for (var i=0;i<(g.V) ;i++){
// cout<<g.rlen[i]<<" ";
// }
// cout<<endl;
// cudaMalloc( (void **) &d_rows, size );
// cudaMalloc( (void **) &d_colind, size );
// cudaMalloc( (void **) &d_roff, size );
// cudaMalloc( (void **) &d_rlen, size );g->
//
// for (var i=0;i< g->n ;i++)
// rows[i] =
thrust::device_vector<uui> d_rows ( g.rows , g.rows + g.n);
thrust::device_vector<uui> d_colind (g.colind , g.colind+ g.E);
thrust::device_vector<uui> d_roff (g.roff , g.roff + g.V + 1 );
thrust::device_vector<uui> d_rlen (g.rlen , g.rlen + g.V);
thrust::device_vector<var> bitmap (GS*g.V);
thrust::fill(bitmap.begin(), bitmap.end(),0);
thrust::device_vector<uui> support(g.E);
thrust::fill(support.begin(), support.end(),0);
uui *d_rows1 = thrust::raw_pointer_cast(&d_rows[0]);
uui *d_colind1 = thrust::raw_pointer_cast(&d_colind[0]);
uui *d_roff1 = thrust::raw_pointer_cast(&d_roff[0]);
uui *d_rlen1 = thrust::raw_pointer_cast(&d_rlen[0]);
uui *d_support1 = thrust::raw_pointer_cast(&support[0]);
var *d_bitmap1 = thrust::raw_pointer_cast(&bitmap[0]);
cudaEvent_t start, stop;
float elapsedTime;
var k=3;
var call=1;
while(call){
if (k>3)
break;
if(k==3)
{
cout<<"Calling Kernel"<<endl;
printf("E=%d V=%d n=%d K=%d\n",g.E,g.V,g.n,k );
cudaEventCreate(&start);
cudaEventRecord(start,0);
getmajorsupport<<<GS,BS>>>(d_colind1,d_roff1,d_rows1,d_rlen1,d_bitmap1,g.V,g.E,g.n,d_support1,k);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cout<<"Out of kernel"<<endl;
call=0;
}
}
// int i;
// cout << "support[" << 0 << "] = " << support[0] << endl;
// for( i = 0; i < support.size(); i++)
// cout << "support[" << i << "] = " << support[i] << endl;
// return 0;
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
}
|
52b4164b86a9c352dc5386b329f10673b1cf97dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix_multiply_gpu_cutlass.h"
__global__ void multiply_matrices_cutlass(double* a_gpu, double* b_gpu, double* c_gpu) {
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N && j < P) {
double sum = 0;
for (int k = 0; k < M; k++) {
sum += a_gpu[i * M + k] * b_gpu[k * P + j];
}
c_gpu[i * P + j] = sum;
}
}
void multiply_matrices_gpu_cutlass(double a[N * M], double b[M * P], double c[N * P]) {
double* a_gpu;
double* b_gpu;
double* c_gpu;
hipMalloc(&a_gpu, N * M * sizeof(double));
hipMalloc(&b_gpu, M * P * sizeof(double));
hipMalloc(&c_gpu, N * P * sizeof(double));
hipMemcpy(a_gpu, a, N * M * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(b_gpu, b, M * P * sizeof(double), hipMemcpyHostToDevice);
int threadsPerBlock1D = 16;
dim3 numBlocks((P + threadsPerBlock1D - 1) / threadsPerBlock1D, (N + threadsPerBlock1D - 1) / threadsPerBlock1D, 1);
dim3 threadsPerBlock(threadsPerBlock1D, threadsPerBlock1D, 1);
hipLaunchKernelGGL(( multiply_matrices_cutlass), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, a_gpu, b_gpu, c_gpu);
hipMemcpy(c, c_gpu, N * P * sizeof(double), hipMemcpyDeviceToHost);
hipFree(a_gpu);
hipFree(b_gpu);
hipFree(c_gpu);
}
|
52b4164b86a9c352dc5386b329f10673b1cf97dc.cu
|
#include "matrix_multiply_gpu_cutlass.h"
__global__ void multiply_matrices_cutlass(double* a_gpu, double* b_gpu, double* c_gpu) {
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N && j < P) {
double sum = 0;
for (int k = 0; k < M; k++) {
sum += a_gpu[i * M + k] * b_gpu[k * P + j];
}
c_gpu[i * P + j] = sum;
}
}
void multiply_matrices_gpu_cutlass(double a[N * M], double b[M * P], double c[N * P]) {
double* a_gpu;
double* b_gpu;
double* c_gpu;
cudaMalloc(&a_gpu, N * M * sizeof(double));
cudaMalloc(&b_gpu, M * P * sizeof(double));
cudaMalloc(&c_gpu, N * P * sizeof(double));
cudaMemcpy(a_gpu, a, N * M * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, M * P * sizeof(double), cudaMemcpyHostToDevice);
int threadsPerBlock1D = 16;
dim3 numBlocks((P + threadsPerBlock1D - 1) / threadsPerBlock1D, (N + threadsPerBlock1D - 1) / threadsPerBlock1D, 1);
dim3 threadsPerBlock(threadsPerBlock1D, threadsPerBlock1D, 1);
multiply_matrices_cutlass<<<numBlocks, threadsPerBlock>>>(a_gpu, b_gpu, c_gpu);
cudaMemcpy(c, c_gpu, N * P * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
}
|
09ff0b66b855be6b846d5778f670e09483c2c747.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__
inline void save_soln_gpu(const double *q, double *qold){
for (int n=0; n<4; n++) qold[n] = q[n];
}
// CUDA kernel function
__global__ void op_cuda_save_soln(
const double *__restrict arg0,
double *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n = n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
save_soln_gpu(arg0+n*4,
arg1+n*4);
}
}
//GPU host stub function
void op_par_loop_save_soln_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_kernels[0].count==1) op_register_strides();
if (OP_diags>2) {
printf(" kernel routine w/o indirection: save_soln");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_save_soln), dim3(nblocks),dim3(nthread), 0, 0,
(double *) arg0.data_d,
(double *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size;
OP_kernels[0].transfer += (float)set->size * arg1.size;
}
void op_par_loop_save_soln_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
if (OP_hybrid_gpu) {
op_par_loop_save_soln_gpu(name, set,
arg0,
arg1);
}else{
op_par_loop_save_soln_cpu(name, set,
arg0,
arg1);
}
}
#else
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
op_par_loop_save_soln_gpu(name, set,
arg0,
arg1);
}
#endif //OP_HYBRID_GPU
|
09ff0b66b855be6b846d5778f670e09483c2c747.cu
|
//
// auto-generated by op2.py
//
//user function
__device__
inline void save_soln_gpu(const double *q, double *qold){
for (int n=0; n<4; n++) qold[n] = q[n];
}
// CUDA kernel function
__global__ void op_cuda_save_soln(
const double *__restrict arg0,
double *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n = n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
save_soln_gpu(arg0+n*4,
arg1+n*4);
}
}
//GPU host stub function
void op_par_loop_save_soln_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_kernels[0].count==1) op_register_strides();
if (OP_diags>2) {
printf(" kernel routine w/o indirection: save_soln");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
op_cuda_save_soln<<<nblocks,nthread>>>(
(double *) arg0.data_d,
(double *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size;
OP_kernels[0].transfer += (float)set->size * arg1.size;
}
void op_par_loop_save_soln_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
if (OP_hybrid_gpu) {
op_par_loop_save_soln_gpu(name, set,
arg0,
arg1);
}else{
op_par_loop_save_soln_cpu(name, set,
arg0,
arg1);
}
}
#else
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
op_par_loop_save_soln_gpu(name, set,
arg0,
arg1);
}
#endif //OP_HYBRID_GPU
|
d02f1f3230d49c0dd3efc4ed04a1ef4560695b82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kSwapRows(float* source, float* target, float* indices1, float* indices2, int nRowIs, int nCols, int nRows){
__shared__ int sourceRowIndices[32], targetRowIndices[32];
const int startRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices1[startRowI + tid]);
targetRowIndices[tid] = int(indices2[startRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nRows)
sourceRowIndices[tid] = -1;
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = sourceRowIndices[i], targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32) {
const float temp1 = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
const float temp2 = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : target[targetRowI * nCols + colI];
if (sourceRowI != -1)
source[sourceRowI * nCols + colI] = temp2;
if (targetRowI != -1)
target[targetRowI * nCols + colI] = temp1;
}
}
}
|
d02f1f3230d49c0dd3efc4ed04a1ef4560695b82.cu
|
#include "includes.h"
__global__ void kSwapRows(float* source, float* target, float* indices1, float* indices2, int nRowIs, int nCols, int nRows){
__shared__ int sourceRowIndices[32], targetRowIndices[32];
const int startRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices1[startRowI + tid]);
targetRowIndices[tid] = int(indices2[startRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nRows)
sourceRowIndices[tid] = -1;
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = sourceRowIndices[i], targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32) {
const float temp1 = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
const float temp2 = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : target[targetRowI * nCols + colI];
if (sourceRowI != -1)
source[sourceRowI * nCols + colI] = temp2;
if (targetRowI != -1)
target[targetRowI * nCols + colI] = temp1;
}
}
}
|
fb78655d670003a5733ef8b24199992db36f784d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "costfun.hh"
void print_matrix(double *Mat, int Mat_height, int Mat_width){
double* aux = (double *)malloc(sizeof(double)*Mat_width*Mat_height);
hipMemcpy(aux, Mat, sizeof(double)*Mat_width*Mat_height, hipMemcpyDeviceToHost);
printf("fil : %d, col : %d\n", Mat_width, Mat_height);
for(int i=0; i<Mat_height; i++)
{
for(int j=0; j<Mat_width; j++)
{
printf("%.10f ",aux[i*Mat_width+j]);
}
printf("\n");
}
free(aux);
}
struct max_exp
{
double max;
max_exp(double m){max = m;};
__device__ double operator()(double y)
{
return exp(y-max);
}
};
// non safe at all
__global__ void logitsSoftmax(double *wordVecs, double *Y_est, int centerIdx, int vocab_size, int embed_size, int offset)
{
// para cada fila tomo los indices del thread
int fil = blockIdx.x * blockDim.x + threadIdx.x;
double logits_value = 0.0;
if (fil < vocab_size)
{
for (int i=0 ; i < embed_size; i++)
{
// recorro las filas de Offset vectors
logits_value += wordVecs[offset+fil*embed_size+i]*wordVecs[centerIdx*embed_size+i];
}
Y_est[fil] = logits_value;
}
}
// gradiente con respecto a la palabra clave (ya le paso el softmax)
// transpongo la matriz de palabras as le actualizo todo
__global__ void gradCenterVec(double* outsideVecs, double* Y_est, double *gradCenter, int vocab_size, int embed_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
double grad = 0.0;
if (fil<embed_size)
{
for (int i=0 ; i < vocab_size; i++)
{
grad += outsideVecs[i*embed_size+fil]*Y_est[i];
}
gradCenter[fil] += grad;
}
__syncthreads();
}
// hago producto externo entre center vecs y y-y_est para actualizar palabras outside
__global__ void gradOutsideVecs(double *centerVec, double *Y_est, double *gradOutside, int vocab_size, int embed_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(fil < vocab_size)
{
if (col < embed_size)
{
gradOutside[fil*embed_size+col] += Y_est[fil]*centerVec[col];
}
}
__syncthreads();
}
// update implica Y = Y_est - Y
__global__ void updateY(double *Y, double *loss, int* out_idxs, int currIdx, int batch_size)
{
// printf("%lf\t%lf\tind : %d \tsent ind : %d\n",Y[out_idxs[currIdx]], log(Y[out_idxs[currIdx]]), currIdx, out_idxs[currIdx]);
*loss -= log(Y[out_idxs[currIdx]]);
Y[out_idxs[currIdx]] += -1;
__syncthreads();
}
__global__ void upCenter(double *centerVec, double *grad_center, double lr, int embed_size, int batch_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
if(fil < embed_size)
{
centerVec[fil] -= lr*grad_center[fil]/batch_size;
}
__syncthreads();
}
__global__ void upOutside(double *outsideVecs, double *grad_outside, double lr, int embed_size, int vocab_size, int batch_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(fil < embed_size)
{
if(col < vocab_size)
{
outsideVecs[col*embed_size + fil] -= lr*grad_outside[col*embed_size + fil]/batch_size;
}
}
__syncthreads();
}
// le paso el vector central y los vectores outside
// vec central es (embed_size, 1), vec outside es (k, embed_size)
// https://devblogs.nvidia.com/unified-memory-cuda-beginners/ por hipMallocManaged
// vec central YA VIENE TRANSPUESTO, ver si es una decision piola o lo transpongo en kernel, c'est le meme
// agarro cada uno de los logits, los exponencio y obtengo una densidad de probabilidad
// de cada palabra externa dada una central
// cost es un vector de K elementos que me da una probabilidad emprica de lo cercanas que estan dos palabras en este espacio. es en el mismo sentido, la entropia conjunta entre la palabra real y_i {i=1,...,k}(con prob 1) y la palabra predicha y^{\hat}_i {i=1,...,k}
W2VCost::W2VCost(int embed_size, int vocab_size, double lr, int batch_size)
{
out_loss.open("out_loss_Cublas.txt");
// el mximo que voy a requerir es context
this -> embed_size = embed_size;
this -> vocab_size = vocab_size;
this -> out_offset = vocab_size*embed_size;
this -> batch_size = batch_size;
this -> lr = lr;
this -> iteration = 0;
hipblasCreate(&(this -> handler));
hipMalloc(&Y_est, vocab_size*sizeof(double));
hipMalloc(&grad_center, embed_size*sizeof(double)); // (1, embed_size)
hipMalloc(&grad_outside, vocab_size*embed_size*sizeof(double)); // (context, embed_size)
hipMalloc(&loss, sizeof(double));
hipMemset(Y_est, 0, vocab_size*sizeof(double));
hipMemset(grad_center, 0, embed_size*sizeof(double));
hipMemset(grad_outside, 0, vocab_size*embed_size*sizeof(double));
hipMemset(loss, 0, sizeof(double));
}
W2VCost::~W2VCost()
{
hipblasDestroy(this -> handler);
hipFree(this -> grad_center);
hipFree(this -> grad_outside);
hipFree(this -> loss);
hipFree(this -> Y_est);
out_loss.close();
}
// para cada palabra externa
void W2VCost::lossAndGrad(double* wordVecs, int* outsideIdxs, int centerIdx, int context_size)
{
// double *aux = (double*)malloc(sizeof(double)*vocab_size*embed_size);
// por cada palabra del contexto, actualizo
for(int currentOutIdx=0; currentOutIdx<context_size; currentOutIdx++)
{
W2VCost::softLoss(wordVecs, centerIdx);
hipLaunchKernelGGL(( updateY), dim3(1),dim3(1), 0, 0, Y_est, loss, outsideIdxs, currentOutIdx, batch_size);
gpuErrchk(hipPeekAtLastError());
// // actualizo gradientes
W2VCost::gradCenter(&wordVecs[out_offset]);
W2VCost::gradOutside(&wordVecs[centerIdx*embed_size]);
}
}
void W2VCost::updateGradients(double* wordVecs, int centerIdx)
{
double loss_h;
updateCenter(&wordVecs[embed_size*centerIdx]);
hipMemset(grad_center, 0, embed_size*sizeof(double));
gpuErrchk(hipPeekAtLastError());
updateOutside(&wordVecs[out_offset]);
// print_matrix(grad_outside, vocab_size, embed_size);
hipMemset(grad_outside, 0, vocab_size*embed_size*sizeof(double));
gpuErrchk(hipPeekAtLastError());
this -> iteration ++;
if((this -> iteration%PRINT_EVERY)== 0){
if (exploss == 0)
{
hipMemcpy(&exploss, loss, sizeof(double), hipMemcpyDeviceToHost);
exploss /= batch_size;
}
else
{
hipMemcpy(&loss_h, loss, sizeof(double), hipMemcpyDeviceToHost);
loss_h /= batch_size;
exploss = 0.95*exploss+0.05*loss_h;
}
printf("Iter : %d\tLoss : %.10lf\n", iteration, exploss);
out_loss << iteration << "," << exploss << endl;
}
// ACTUALIZO OUTSIDE CADA BATCH SIZE Y CAMBIO LR
if((this -> iteration%this->batch_size) == 0)
{
hipMemset(loss, 0, sizeof(double));
this -> lr *= 0.5;
}
}
void W2VCost::updateCenter(double* centerVec)
{
// necesito vocab_size threads
dim3 block_size(256);
dim3 block_num((embed_size+block_size.x-1)/block_size.x);
hipLaunchKernelGGL(( upCenter), dim3(block_num), dim3(block_size), 0, 0, centerVec, grad_center, lr, embed_size, batch_size);
gpuErrchk(hipPeekAtLastError());
}
void W2VCost::updateOutside(double* outsideVecs)
{
// necesito vocab_size threads
dim3 block_size(8, 8);
dim3 block_num((embed_size+block_size.x-1)/block_size.x, (vocab_size+block_size.y-1)/block_size.y);
hipLaunchKernelGGL(( upOutside), dim3(block_num), dim3(block_size), 0, 0, outsideVecs, grad_outside, lr, embed_size, vocab_size, batch_size);
gpuErrchk(hipPeekAtLastError());
}
void W2VCost::softLoss(double *wordVecs, int centerVecIdx)
{
double sum = 0.0;
double max;
// necesito vocab_size threads
dim3 block_size(256);
dim3 block_num((vocab_size+block_size.x-1)/block_size.x);
assert(out_offset == vocab_size*embed_size);
assert(centerVecIdx < vocab_size);
#ifdef SIMPLECUDA
hipLaunchKernelGGL(( logitsSoftmax), dim3(block_num), dim3(block_size), 0, 0, wordVecs, Y_est, centerVecIdx, vocab_size, embed_size, out_offset);
gpuErrchk(hipPeekAtLastError());
#endif
#ifdef CUBLAS
stat = hipblasDgemv(this->handler, HIPBLAS_OP_T, embed_size, vocab_size, &alfa, &wordVecs[out_offset], embed_size, &wordVecs[embed_size*centerVecIdx], 1, &beta, Y_est, 1);
#endif
thrust::device_ptr<double>Y_dev = thrust::device_pointer_cast(Y_est);
max = *(thrust::max_element(Y_dev, Y_dev+vocab_size));
// para fomentar la estabilidad y las buenas costumbres
thrust::transform(Y_dev, Y_dev+vocab_size, Y_dev, max_exp(max));
sum = thrust::reduce(Y_dev, Y_dev+vocab_size, 0, thrust::plus<double>());
thrust::transform(Y_dev, Y_dev+vocab_size, Y_dev, _1/sum);
gpuErrchk(hipPeekAtLastError());
}
void W2VCost::gradCenter(double *outsideVecs)
{
dim3 block_size(256);
dim3 block_num((embed_size+block_size.x-1)/block_size.x);
#ifdef SIMPLECUDA
hipLaunchKernelGGL(( gradCenterVec), dim3(block_num), dim3(block_size), 0, 0, outsideVecs, Y_est, grad_center, vocab_size, embed_size);
gpuErrchk(hipPeekAtLastError());
#endif
#ifdef CUBLAS
stat = hipblasDgemv(this->handler, HIPBLAS_OP_N, embed_size, vocab_size, &alfa, outsideVecs, embed_size, Y_est, 1, &beta, grad_center, 1);
#endif
}
void W2VCost::gradOutside(double *centerVec)
{
// cout <<"Batch size gradout : " << batch_size << endl;
dim3 block_size(8, 8);
dim3 block_num((vocab_size+block_size.x-1)/block_size.x, (embed_size+block_size.y-1)/block_size.y);
hipLaunchKernelGGL(( gradOutsideVecs), dim3(block_num), dim3(block_size), 0, 0, centerVec, Y_est, grad_outside, vocab_size, embed_size);
gpuErrchk(hipPeekAtLastError());
}
|
fb78655d670003a5733ef8b24199992db36f784d.cu
|
#include "costfun.hh"
void print_matrix(double *Mat, int Mat_height, int Mat_width){
double* aux = (double *)malloc(sizeof(double)*Mat_width*Mat_height);
cudaMemcpy(aux, Mat, sizeof(double)*Mat_width*Mat_height, cudaMemcpyDeviceToHost);
printf("fil : %d, col : %d\n", Mat_width, Mat_height);
for(int i=0; i<Mat_height; i++)
{
for(int j=0; j<Mat_width; j++)
{
printf("%.10f ",aux[i*Mat_width+j]);
}
printf("\n");
}
free(aux);
}
struct max_exp
{
double max;
max_exp(double m){max = m;};
__device__ double operator()(double y)
{
return exp(y-max);
}
};
// non safe at all
__global__ void logitsSoftmax(double *wordVecs, double *Y_est, int centerIdx, int vocab_size, int embed_size, int offset)
{
// para cada fila tomo los indices del thread
int fil = blockIdx.x * blockDim.x + threadIdx.x;
double logits_value = 0.0;
if (fil < vocab_size)
{
for (int i=0 ; i < embed_size; i++)
{
// recorro las filas de Offset vectors
logits_value += wordVecs[offset+fil*embed_size+i]*wordVecs[centerIdx*embed_size+i];
}
Y_est[fil] = logits_value;
}
}
// gradiente con respecto a la palabra clave (ya le paso el softmax)
// transpongo la matriz de palabras así le actualizo todo
__global__ void gradCenterVec(double* outsideVecs, double* Y_est, double *gradCenter, int vocab_size, int embed_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
double grad = 0.0;
if (fil<embed_size)
{
for (int i=0 ; i < vocab_size; i++)
{
grad += outsideVecs[i*embed_size+fil]*Y_est[i];
}
gradCenter[fil] += grad;
}
__syncthreads();
}
// hago producto externo entre center vecs y y-y_est para actualizar palabras outside
__global__ void gradOutsideVecs(double *centerVec, double *Y_est, double *gradOutside, int vocab_size, int embed_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(fil < vocab_size)
{
if (col < embed_size)
{
gradOutside[fil*embed_size+col] += Y_est[fil]*centerVec[col];
}
}
__syncthreads();
}
// update implica Y = Y_est - Y
__global__ void updateY(double *Y, double *loss, int* out_idxs, int currIdx, int batch_size)
{
// printf("%lf\t%lf\tind : %d \tsent ind : %d\n",Y[out_idxs[currIdx]], log(Y[out_idxs[currIdx]]), currIdx, out_idxs[currIdx]);
*loss -= log(Y[out_idxs[currIdx]]);
Y[out_idxs[currIdx]] += -1;
__syncthreads();
}
__global__ void upCenter(double *centerVec, double *grad_center, double lr, int embed_size, int batch_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
if(fil < embed_size)
{
centerVec[fil] -= lr*grad_center[fil]/batch_size;
}
__syncthreads();
}
__global__ void upOutside(double *outsideVecs, double *grad_outside, double lr, int embed_size, int vocab_size, int batch_size)
{
int fil = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(fil < embed_size)
{
if(col < vocab_size)
{
outsideVecs[col*embed_size + fil] -= lr*grad_outside[col*embed_size + fil]/batch_size;
}
}
__syncthreads();
}
// le paso el vector central y los vectores outside
// vec central es (embed_size, 1), vec outside es (k, embed_size)
// https://devblogs.nvidia.com/unified-memory-cuda-beginners/ por cudaMallocManaged
// vec central YA VIENE TRANSPUESTO, ver si es una decision piola o lo transpongo en kernel, c'est le meme
// agarro cada uno de los logits, los exponencio y obtengo una densidad de probabilidad
// de cada palabra externa dada una central
// cost es un vector de K elementos que me da una probabilidad empírica de lo cercanas que estan dos palabras en este espacio. es en el mismo sentido, la entropia conjunta entre la palabra real y_i {i=1,...,k}(con prob 1) y la palabra predicha y^{\hat}_i {i=1,...,k}
W2VCost::W2VCost(int embed_size, int vocab_size, double lr, int batch_size)
{
out_loss.open("out_loss_Cublas.txt");
// el máximo que voy a requerir es context
this -> embed_size = embed_size;
this -> vocab_size = vocab_size;
this -> out_offset = vocab_size*embed_size;
this -> batch_size = batch_size;
this -> lr = lr;
this -> iteration = 0;
cublasCreate(&(this -> handler));
cudaMalloc(&Y_est, vocab_size*sizeof(double));
cudaMalloc(&grad_center, embed_size*sizeof(double)); // (1, embed_size)
cudaMalloc(&grad_outside, vocab_size*embed_size*sizeof(double)); // (context, embed_size)
cudaMalloc(&loss, sizeof(double));
cudaMemset(Y_est, 0, vocab_size*sizeof(double));
cudaMemset(grad_center, 0, embed_size*sizeof(double));
cudaMemset(grad_outside, 0, vocab_size*embed_size*sizeof(double));
cudaMemset(loss, 0, sizeof(double));
}
W2VCost::~W2VCost()
{
cublasDestroy(this -> handler);
cudaFree(this -> grad_center);
cudaFree(this -> grad_outside);
cudaFree(this -> loss);
cudaFree(this -> Y_est);
out_loss.close();
}
// para cada palabra externa
void W2VCost::lossAndGrad(double* wordVecs, int* outsideIdxs, int centerIdx, int context_size)
{
// double *aux = (double*)malloc(sizeof(double)*vocab_size*embed_size);
// por cada palabra del contexto, actualizo
for(int currentOutIdx=0; currentOutIdx<context_size; currentOutIdx++)
{
W2VCost::softLoss(wordVecs, centerIdx);
updateY<<<1,1>>>(Y_est, loss, outsideIdxs, currentOutIdx, batch_size);
gpuErrchk(cudaPeekAtLastError());
// // actualizo gradientes
W2VCost::gradCenter(&wordVecs[out_offset]);
W2VCost::gradOutside(&wordVecs[centerIdx*embed_size]);
}
}
void W2VCost::updateGradients(double* wordVecs, int centerIdx)
{
double loss_h;
updateCenter(&wordVecs[embed_size*centerIdx]);
cudaMemset(grad_center, 0, embed_size*sizeof(double));
gpuErrchk(cudaPeekAtLastError());
updateOutside(&wordVecs[out_offset]);
// print_matrix(grad_outside, vocab_size, embed_size);
cudaMemset(grad_outside, 0, vocab_size*embed_size*sizeof(double));
gpuErrchk(cudaPeekAtLastError());
this -> iteration ++;
if((this -> iteration%PRINT_EVERY)== 0){
if (exploss == 0)
{
cudaMemcpy(&exploss, loss, sizeof(double), cudaMemcpyDeviceToHost);
exploss /= batch_size;
}
else
{
cudaMemcpy(&loss_h, loss, sizeof(double), cudaMemcpyDeviceToHost);
loss_h /= batch_size;
exploss = 0.95*exploss+0.05*loss_h;
}
printf("Iter : %d\tLoss : %.10lf\n", iteration, exploss);
out_loss << iteration << "," << exploss << endl;
}
// ACTUALIZO OUTSIDE CADA BATCH SIZE Y CAMBIO LR
if((this -> iteration%this->batch_size) == 0)
{
cudaMemset(loss, 0, sizeof(double));
this -> lr *= 0.5;
}
}
void W2VCost::updateCenter(double* centerVec)
{
// necesito vocab_size threads
dim3 block_size(256);
dim3 block_num((embed_size+block_size.x-1)/block_size.x);
upCenter<<<block_num, block_size>>>(centerVec, grad_center, lr, embed_size, batch_size);
gpuErrchk(cudaPeekAtLastError());
}
void W2VCost::updateOutside(double* outsideVecs)
{
// necesito vocab_size threads
dim3 block_size(8, 8);
dim3 block_num((embed_size+block_size.x-1)/block_size.x, (vocab_size+block_size.y-1)/block_size.y);
upOutside<<<block_num, block_size>>>(outsideVecs, grad_outside, lr, embed_size, vocab_size, batch_size);
gpuErrchk(cudaPeekAtLastError());
}
void W2VCost::softLoss(double *wordVecs, int centerVecIdx)
{
double sum = 0.0;
double max;
// necesito vocab_size threads
dim3 block_size(256);
dim3 block_num((vocab_size+block_size.x-1)/block_size.x);
assert(out_offset == vocab_size*embed_size);
assert(centerVecIdx < vocab_size);
#ifdef SIMPLECUDA
logitsSoftmax<<<block_num, block_size>>>(wordVecs, Y_est, centerVecIdx, vocab_size, embed_size, out_offset);
gpuErrchk(cudaPeekAtLastError());
#endif
#ifdef CUBLAS
stat = cublasDgemv(this->handler, CUBLAS_OP_T, embed_size, vocab_size, &alfa, &wordVecs[out_offset], embed_size, &wordVecs[embed_size*centerVecIdx], 1, &beta, Y_est, 1);
#endif
thrust::device_ptr<double>Y_dev = thrust::device_pointer_cast(Y_est);
max = *(thrust::max_element(Y_dev, Y_dev+vocab_size));
// para fomentar la estabilidad y las buenas costumbres
thrust::transform(Y_dev, Y_dev+vocab_size, Y_dev, max_exp(max));
sum = thrust::reduce(Y_dev, Y_dev+vocab_size, 0, thrust::plus<double>());
thrust::transform(Y_dev, Y_dev+vocab_size, Y_dev, _1/sum);
gpuErrchk(cudaPeekAtLastError());
}
void W2VCost::gradCenter(double *outsideVecs)
{
dim3 block_size(256);
dim3 block_num((embed_size+block_size.x-1)/block_size.x);
#ifdef SIMPLECUDA
gradCenterVec<<<block_num, block_size>>>(outsideVecs, Y_est, grad_center, vocab_size, embed_size);
gpuErrchk(cudaPeekAtLastError());
#endif
#ifdef CUBLAS
stat = cublasDgemv(this->handler, CUBLAS_OP_N, embed_size, vocab_size, &alfa, outsideVecs, embed_size, Y_est, 1, &beta, grad_center, 1);
#endif
}
void W2VCost::gradOutside(double *centerVec)
{
// cout <<"Batch size gradout : " << batch_size << endl;
dim3 block_size(8, 8);
dim3 block_num((vocab_size+block_size.x-1)/block_size.x, (embed_size+block_size.y-1)/block_size.y);
gradOutsideVecs<<<block_num, block_size>>>(centerVec, Y_est, grad_outside, vocab_size, embed_size);
gpuErrchk(cudaPeekAtLastError());
}
|
5df19040f0ecd99a78b593c385e3695f2b3f9082.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
imagePtr = newColor;*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
// float4 allPix = *(float4*)(&cuConstRendererParams.imageData[(4 * (y * imageWidth + x))]);
// float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
// float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
// float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
// float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float red_pixel = cuConstRendererParams.imageData[((y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 3];
// float red_pixel = 0;
// float blue_pixel = 0;
// float green_pixel = 0;
// float alpha_pixel = 0;
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
//TODO: is it converted to registers?? ---> I think its best if I pass by reference??
//float4 imgPtr = *(float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
__syncthreads(); //to make sure this shared memory is visible to everyone --> can remove this as the syncthreads below will take care of it
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
//__shared__ int circleInBox;
//if(threadIdx.x + threadIdx.y == 0) {
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//}
//__syncthreads(); //TODO: is this even needed? --- but why?
if(circleInBox == 0) { continue; }
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
red_pixel ++;
green_pixel ++;
blue_pixel ++;
alpha_pixel ++;
}
__syncthreads();
//cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 3 ] = alpha_pixel;
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
//TODO: why does perf tank with more kernels --- what's the trade off?
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
|
5df19040f0ecd99a78b593c385e3695f2b3f9082.cu
|
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
imagePtr = newColor;*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
// float4 allPix = *(float4*)(&cuConstRendererParams.imageData[(4 * (y * imageWidth + x))]);
// float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
// float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
// float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
// float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float red_pixel = cuConstRendererParams.imageData[((y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[((y * imageWidth + x)) + 3];
// float red_pixel = 0;
// float blue_pixel = 0;
// float green_pixel = 0;
// float alpha_pixel = 0;
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
//TODO: is it converted to registers?? ---> I think its best if I pass by reference??
//float4 imgPtr = *(float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
__syncthreads(); //to make sure this shared memory is visible to everyone --> can remove this as the syncthreads below will take care of it
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
//__shared__ int circleInBox;
//if(threadIdx.x + threadIdx.y == 0) {
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//}
//__syncthreads(); //TODO: is this even needed? --- but why?
if(circleInBox == 0) { continue; }
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
red_pixel ++;
green_pixel ++;
blue_pixel ++;
alpha_pixel ++;
}
__syncthreads();
//cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
//cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[(y * imageWidth + x) + 3 ] = alpha_pixel;
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
//TODO: why does perf tank with more kernels --- what's the trade off?
kernelRenderCircles<<<gridDim, blockDim>>>();
gpuErrchk(cudaDeviceSynchronize());
}
|
44e3e9fe8f612ba19ddd6b29027c6da9e250de0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
__global__ void helloFromGPU(void)
{
printf("Hello World!\n");
}
__global__ void add(int i, int j)
{
int count;
count = i + j;
printf("\n Num is %d\n ", count);
}
__global__ void what_is_my_id(unsigned int* const block,
unsigned int* const thread,
unsigned int* const warp,
unsigned int* const calc_thread)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
block[thread_idx] = blockIdx.x;
thread[thread_idx] = threadIdx.x;
warp[thread_idx] = threadIdx.x / warpSize;
calc_thread[thread_idx] = thread_idx;
}
#define ARRAY_SIZE 128
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
unsigned int cpu_block[ARRAY_SIZE];
unsigned int cpu_thread[ARRAY_SIZE];
unsigned int cpu_warp[ARRAY_SIZE];
unsigned int cpu_calc_thread[ARRAY_SIZE];
int main(void)
{
const unsigned int num_blocks = 2;
const unsigned int num_thread = 64;
char ch;
unsigned int* gpu_block;
unsigned int* gpu_thread;
unsigned int* gpu_warp;
unsigned int* gpu_calc_thread;
unsigned int i;
hipMalloc((void**)&gpu_block, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_thread,ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_warp, ARRAY_SIZE_IN_BYTES);
hipMalloc((void**)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
what_is_my_id << <num_blocks, num_thread >> > (gpu_block, gpu_thread, gpu_warp, gpu_calc_thread);
hipMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_block, gpu_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost);
hipFree(gpu_block);
hipFree(gpu_thread);
hipFree(gpu_warp);
hipFree(gpu_calc_thread);
/*
add << <1, 1 >> > (10, 15);
hipError_t cudaStatus;
printf("Hello World from GPU \n");
helloFromGPU << <2, 10 >> > ();
hipDeviceReset();
*/
for (i = 0; i<ARRAY_SIZE;i++)
{
printf("Calculated Thread:%3u - Block: %2u - Warp %2u - Thread %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]);
}
ch = getchar();
return 0;
}
|
44e3e9fe8f612ba19ddd6b29027c6da9e250de0e.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
__global__ void helloFromGPU(void)
{
printf("Hello World!\n");
}
__global__ void add(int i, int j)
{
int count;
count = i + j;
printf("\n Num is %d\n ", count);
}
__global__ void what_is_my_id(unsigned int* const block,
unsigned int* const thread,
unsigned int* const warp,
unsigned int* const calc_thread)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
block[thread_idx] = blockIdx.x;
thread[thread_idx] = threadIdx.x;
warp[thread_idx] = threadIdx.x / warpSize;
calc_thread[thread_idx] = thread_idx;
}
#define ARRAY_SIZE 128
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
unsigned int cpu_block[ARRAY_SIZE];
unsigned int cpu_thread[ARRAY_SIZE];
unsigned int cpu_warp[ARRAY_SIZE];
unsigned int cpu_calc_thread[ARRAY_SIZE];
int main(void)
{
const unsigned int num_blocks = 2;
const unsigned int num_thread = 64;
char ch;
unsigned int* gpu_block;
unsigned int* gpu_thread;
unsigned int* gpu_warp;
unsigned int* gpu_calc_thread;
unsigned int i;
cudaMalloc((void**)&gpu_block, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_thread,ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_warp, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
what_is_my_id << <num_blocks, num_thread >> > (gpu_block, gpu_thread, gpu_warp, gpu_calc_thread);
cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaFree(gpu_block);
cudaFree(gpu_thread);
cudaFree(gpu_warp);
cudaFree(gpu_calc_thread);
/*
add << <1, 1 >> > (10, 15);
cudaError_t cudaStatus;
printf("Hello World from GPU \n");
helloFromGPU << <2, 10 >> > ();
cudaDeviceReset();
*/
for (i = 0; i<ARRAY_SIZE;i++)
{
printf("Calculated Thread:%3u - Block: %2u - Warp %2u - Thread %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]);
}
ch = getchar();
return 0;
}
|
85a3c07a100c964d083ef3f6e29c20dbb727b501.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl_diag.cu, normal z -> s, Mon Jun 25 18:24:12 2018
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_diag_lower(
int m, int n,
const float* D, int ldd,
float* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_S_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_diag_upper(
int m, int n,
const float* D, int ldd,
float* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_S_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
SLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl_diag(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD, magma_int_t lddd,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl_diag_lower)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl_diag_upper)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
}
|
85a3c07a100c964d083ef3f6e29c20dbb727b501.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl_diag.cu, normal z -> s, Mon Jun 25 18:24:12 2018
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_diag_lower(
int m, int n,
const float* D, int ldd,
float* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_S_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_diag_upper(
int m, int n,
const float* D, int ldd,
float* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_S_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
SLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl_diag(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD, magma_int_t lddd,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
slascl_diag_lower
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
slascl_diag_upper
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
}
|
831d7813cde86ba909f5f7b646960a481a57c195.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
checkCudaErrors(hipMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
getLastCudaError("my_first_kernel execution failed\n");
// copy back results and print them out
checkCudaErrors( hipMemcpy(h_x,d_x,nsize*sizeof(float),
hipMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
checkCudaErrors(hipFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
|
831d7813cde86ba909f5f7b646960a481a57c195.cu
|
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
checkCudaErrors(cudaMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
getLastCudaError("my_first_kernel execution failed\n");
// copy back results and print them out
checkCudaErrors( cudaMemcpy(h_x,d_x,nsize*sizeof(float),
cudaMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
checkCudaErrors(cudaFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
f820d49407bf25b866dd49bb6ea6e5124828f678.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"HolidayConvolutionGPU.h"
#include"HolidayNetResource.h"
#include"HolidayCNN_proto.pb.h"
#include"HolidayBlobGpu.h"
HolidayConvolutionGPU::HolidayConvolutionGPU()
: pfKernel_d(nullptr), pfBias_d(nullptr), ppfBlas(nullptr), ppfBlas_d(nullptr), ppfKernel_d(nullptr)
{
}
HolidayConvolutionGPU::~HolidayConvolutionGPU()
{
}
__global__ static void gConvMatrixMult_kernel(float *pfA, float *pfB, float *pfC, int dwN, int dwM, int dwP, int dwG)
{
__shared__ float pfTmpA[16][16];
__shared__ float pfTmpB[16][16];
int dwDimNG = (blockDim.y * blockIdx.y + threadIdx.y) / (CUDA_BLOCK(dwN, 16) * 16);
int dwDimG = dwDimNG % dwG;
float *pfOffA = pfA + dwDimG * dwN * dwP;
float *pfOffB = pfB + dwDimNG * dwP * dwM;
float *pfOffC = pfC + dwDimNG * dwN * dwM;
int dwGlobalIdxN = (blockDim.y * blockIdx.y + threadIdx.y) % (CUDA_BLOCK(dwN, 16) * 16);
int dwGlobalIdxM = blockDim.x * blockIdx.x + threadIdx.x;
int dwLocalIdxN = threadIdx.y;
int dwLocalIdxM = threadIdx.x;
float fResults = 0;
float fComp = 0;
for (int j = 0; j < dwP; j += 16)
{
if (dwGlobalIdxN < dwN && dwLocalIdxM + j < dwP)
{
pfTmpA[dwLocalIdxN][dwLocalIdxM] = pfOffA[dwGlobalIdxN * dwP + dwLocalIdxM + j];
}
else
{
pfTmpA[dwLocalIdxN][dwLocalIdxM] = 0;
}
if (dwGlobalIdxM < dwM && dwLocalIdxN + j < dwP)
{
pfTmpB[dwLocalIdxN][dwLocalIdxM] = pfOffB[(dwLocalIdxN + j) * dwM + dwGlobalIdxM];
}
else
{
pfTmpB[dwLocalIdxN][dwLocalIdxM] = 0;
}
__syncthreads();
for (int i = 0; i < 16; i++)
{
float fTmp;
fComp -= pfTmpA[dwLocalIdxN][i] * pfTmpB[i][dwLocalIdxM];
fTmp = fResults - fComp;
fComp = (fTmp - fResults) + fComp;
fResults = fTmp;
}
__syncthreads();
}
if (dwGlobalIdxM < dwM && dwGlobalIdxN < dwN)
{
pfOffC[dwGlobalIdxN * dwM + dwGlobalIdxM] += fResults;
}
}
__global__ static void gInputTrans_kernel(float *pfDataIn, float *pfDataOut, int dwSize, int dwRowIn, int dwColIn,
int dwSliceIn, int dwRowOut, int dwColOut, int dwSliceOut, int dwStrideH, int dwStrideW,
int dwPadH, int dwPadW,
int dwShiftH, int dwShiftW,
int dwDilationH, int dwDilationW, int dwKernelH, int dwKernelW)
{
dwPadH += dwShiftH;
dwPadW += dwShiftW;
int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (dwIdx < dwSize)
{
int dwDimN = dwIdx / (dwSliceOut * dwRowOut * dwColOut);
int dwDim2S = dwIdx % (dwSliceOut * dwRowOut * dwColOut) / (dwRowOut * dwColOut);
int dwDim2R = dwIdx % (dwRowOut * dwColOut) / dwColOut;
int dwDim2C = dwIdx % dwColOut;
int dwDim1R = dwDim2R * dwStrideH - dwPadH;
int dwDim1C = dwDim2C * dwStrideW - dwPadW;
int dwDim1S = dwDim2S;
int dwIdxOut = ((dwDimN * dwSliceOut + dwDim1S) * dwKernelH * dwKernelW * dwRowOut + dwDim2R) * dwColOut + dwDim2C;
int dwIdxIn = dwDim1C + dwColIn * (dwDim1R + dwRowIn * (dwDim1S + dwSliceIn * dwDimN));
for (int i = 0; i < dwKernelH; i++)
{
for (int j = 0; j < dwKernelW; j++)
{
if (dwDim1R + i * dwDilationH >= 0 && dwDim1R + i * dwDilationH < dwRowIn
&& dwDim1C + j * dwDilationW >= 0 && dwDim1C + j * dwDilationW < dwColIn)
{
pfDataOut[dwIdxOut + dwColOut * dwRowOut * (i * dwKernelW + j)] =
pfDataIn[dwIdxIn + j * dwDilationW + dwColIn * i * dwDilationH];
}
else
{
pfDataOut[dwIdxOut + dwColOut * dwRowOut * (i * dwKernelW + j)] = 0;
}
}
}
}
}
//__global__ static void gBiasSet_kernel(float *pfBias, float *pfOut, int dwBiasStep, int dwOutSize)
//{
// int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
// if (dwIdx < dwOutSize)
// pfOut[dwIdx] = pfBias[dwIdx / dwBiasStep];
//}
__global__ static void gBiasSet_kernel(float *pfBias, float *pfOut, int dwBiasStep, int dwExtStep, int dwOutSize)
{
int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (dwIdx < dwOutSize)
pfOut[dwIdx] = pfBias[(dwIdx % dwExtStep) / dwBiasStep];
}
int HolidayConvolutionGPU::Init(Holiday_LayerParameter &inputparam, HolidayNetResource<float> *p_holiday_net_resource)
{
pNetResourceGpu = (HolidayNetResourceGpu *)p_holiday_net_resource->pNetResourceGpu;
//
dwKernelNum = inputparam.convolution_param().kernel_param().shape().dim(0);
dwKernelRows = inputparam.convolution_param().kernel_param().shape().dim(2);
dwKernelCols = inputparam.convolution_param().kernel_param().shape().dim(3);
dwKernelSlices = inputparam.convolution_param().kernel_param().shape().dim(1);
//std::cout << "conv init 1" << std::endl;
int bottom_index = inputparam.bottom_index(0);
HolidayDataSize bottom_size = p_holiday_net_resource->feature_vector_size[bottom_index];
this->bottom_data_size.resize(1);
this->bottom_data_size[0] = bottom_size;
std::vector<int> shape;
const ::Holiday_BlobShape& tmp_shape = inputparam.convolution_param().kernel_param().shape();
//std::cout << "conv init 2" << std::endl;
for (int i = 0; i < tmp_shape.dim_size(); i++)
{
shape.push_back(tmp_shape.dim(i));
}
dwGroup = bottom_data_size[0].data_dim[1] / dwKernelSlices;
dwStrideH = inputparam.convolution_param().stride_height();
dwStrideW = inputparam.convolution_param().stride_width();
dwPadH = inputparam.convolution_param().pad_height();
dwPadW = inputparam.convolution_param().pad_width();
dwDilationH = inputparam.convolution_param().dilation_height();
dwDilationW = inputparam.convolution_param().dilation_width();
if (inputparam.convolution_param().has_tf_padding())
{
m_tf_padding = inputparam.convolution_param().tf_padding();
}
top_data_size.resize(1);
// calculate top blobs
Calculate(bottom_data_size[0].data_dim, top_data_size[0].data_dim);
bool is_1x1_conv = dwKernelRows == 1 && dwKernelCols == 1 && dwPadH == 0 && dwPadW == 0 && dwStrideH == 1 && dwStrideW == 1;
// tmp buffer
int dwKernelSize = dwKernelSlices * dwKernelRows * dwKernelCols;
if (!is_1x1_conv) {
gTmpBuffer_gpu(pNetResourceGpu, top_data_size[0].data_dim[3] * top_data_size[0].data_dim[2] * dwKernelSize * dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float));
}
// transData
ppfBlas = new float*[dwGroup * pNetResourceGpu->dwMaxBatchNum * 2];
CUDA_ERROR(SafeCudaMalloc((void **)&ppfBlas_d, dwGroup * pNetResourceGpu->dwMaxBatchNum * 2 * sizeof(float*)));
//kernel param
CUDA_ERROR(SafeCudaMalloc((void **)&pfKernel_d, dwKernelSize * dwKernelNum * sizeof(float)));
const float *pfKernelT = inputparam.mutable_convolution_param()->mutable_kernel_param()->mutable_data()->mutable_data();
CUDA_ERROR(hipMemcpyAsync(pfKernel_d, pfKernelT, dwKernelSize *dwKernelNum* sizeof(float), hipMemcpyHostToDevice, pNetResourceGpu->main_stream));
std::unique_ptr<float*[]> ppfKernel(new float*[dwGroup * pNetResourceGpu->dwMaxBatchNum]);
CUDA_ERROR(SafeCudaMalloc((void **)&ppfKernel_d, dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float*)));
for (int i = 0; i < dwGroup * pNetResourceGpu->dwMaxBatchNum; ++i)
{
ppfKernel[i] = pfKernel_d + (i % dwGroup) * dwKernelSize * (dwKernelNum / dwGroup);
}
CUDA_ERROR(hipMemcpyAsync(ppfKernel_d, ppfKernel.get(), dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float *), hipMemcpyHostToDevice, pNetResourceGpu->main_stream));
//bias param
int dwsize = top_data_size[0].data_dim[1] * top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3];
CUDA_ERROR(SafeCudaMalloc((void **)&pfBias_d, dwKernelNum * sizeof(float)));
//float *pfBiasTmp_d;
//CUDA_ERROR(SafeCudaMalloc((void **)&pfBiasTmp_d, dwKernelNum * sizeof(float)));
if (inputparam.convolution_param().bias_param().data().size())
{
const float *pfBias = inputparam.mutable_convolution_param()->mutable_bias_param()->mutable_data()->mutable_data();
CUDA_ERROR(hipMemcpyAsync(pfBias_d, pfBias, dwKernelNum * sizeof(float), hipMemcpyHostToDevice, pNetResourceGpu->main_stream));
}
else
{
CUDA_ERROR(hipMemsetAsync(pfBias_d, 0, dwKernelNum * sizeof(float), pNetResourceGpu->main_stream));
}
hipStreamSynchronize(pNetResourceGpu->main_stream);
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Exit()
{
if (pfKernel_d) hipFree(pfKernel_d);
if (pfBias_d) hipFree(pfBias_d);
if (ppfBlas) delete[]ppfBlas;
if (ppfBlas_d) hipFree(ppfBlas_d);
if (ppfKernel_d) hipFree(ppfKernel_d);
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Process(std::vector<HolidayFeatureMap<float>*> input_data_map, std::vector<HolidayFeatureMap<float>*>& output_data_map)
{
#ifdef _DEBUG
hipEvent_t start1;
hipEventCreate(&start1);
hipEvent_t stop1;
hipEventCreate(&stop1);
hipEventRecord(start1, NULL);
#endif
input_data_map[0]->m_gpu.shape_ = input_data_map[0]->data_shape;
input_data_map[0]->m_gpu.Gpu_DataIn(pNetResourceGpu, input_data_map[0]->dwStorageType, input_data_map[0]->m_cpu.dataMemoryPtr());
input_data_map[0]->dwStorageType = DATA_GPU;
output_data_map[0]->dwStorageType = DATA_GPU;
Calculate(input_data_map[0]->data_shape, output_data_map[0]->data_shape);
output_data_map[0]->m_gpu.shape_ = output_data_map[0]->data_shape;
output_data_map[0]->m_gpu.data_size = output_data_map[0]->data_shape[0] * output_data_map[0]->data_shape[1] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3];
//gInputTrans_kernel << <CUDA_BLOCK(top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3]
// * bottom_data_size[0].data_dim[1] * input_data_map[0]->m_gpu.shape_[0], CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >
// (input_data_map[0]->m_gpu.pfData_gpu, pfDataTrans_d,
// top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3] * bottom_data_size[0].data_dim[1] * input_data_map[0]->m_gpu.shape_[0],
// bottom_data_size[0].data_dim[2], bottom_data_size[0].data_dim[3], bottom_data_size[0].data_dim[1],
// top_data_size[0].data_dim[2], top_data_size[0].data_dim[3], bottom_data_size[0].data_dim[1],
// dwStrideH, dwStrideW, dwPadH, dwPadW, dwDilationH, dwDilationW, dwKernelRows, dwKernelCols);
int dwsize = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1];
int put_param = output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3] * input_data_map[0]->data_shape[1] * input_data_map[0]->data_shape[0];
bool is_1x1_conv = dwKernelRows == 1 && dwKernelCols == 1 && dwPadH == 0 && dwPadW == 0 && dwStrideH == 1 && dwStrideW == 1;
float *pfDataTrans_d = nullptr;
if (is_1x1_conv)
{
pfDataTrans_d = (float *)input_data_map[0]->m_gpu.pfData_gpu;
}
else
{
gTmpBuffer_gpu(pNetResourceGpu, output_data_map[0]->m_gpu.data_size * dwKernelRows * dwKernelCols * dwGroup * sizeof(float));
pfDataTrans_d = (float *)pNetResourceGpu->pubyConvTmpBuffer;
gInputTrans_kernel << <CUDA_BLOCK(put_param, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >
((float *)input_data_map[0]->m_gpu.pfData_gpu, pfDataTrans_d,
output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3] * input_data_map[0]->data_shape[1] * input_data_map[0]->data_shape[0],
input_data_map[0]->data_shape[2], input_data_map[0]->data_shape[3], input_data_map[0]->data_shape[1],
output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[3], input_data_map[0]->data_shape[1],
dwStrideH, dwStrideW,
dwPadH + m_tf_fake_padding_h, dwPadW + m_tf_fake_padding_w,
m_tf_conv_shift_h, m_tf_conv_shift_w,
dwDilationH, dwDilationW, dwKernelRows, dwKernelCols);
}
#ifdef _DEBUG
// int buffer_size = output_data_map[0]->m_gpu.data_size * dwKernelRows * dwKernelCols * dwGroup;
//
// float *pfcol_DataOut = new float[buffer_size];
// CUDA_ERROR(hipMemcpy(pfcol_DataOut, pfDataTrans_d, buffer_size* sizeof(float), hipMemcpyDeviceToHost));
// delete[] pfcol_DataOut;
// hipDeviceSynchronize();
#endif
//gBiasSet_kernel << <CUDA_BLOCK(output_data_map[0]->m_gpu.shape_, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >(pfBias_d, (float *)output_data_map[0]->m_gpu.pfData_gpu,
// output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1], output_data_map[0]->m_gpu.data_size);
gBiasSet_kernel << <CUDA_BLOCK(output_data_map[0]->m_gpu.data_size, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >(pfBias_d,
(float *)output_data_map[0]->m_gpu.pfData_gpu,
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1],
output_data_map[0]->m_gpu.data_size);
#ifdef _X64_
if (dwsize / dwGroup < 10000)
{
for (int i = 0; i < dwGroup * input_data_map[0]->m_gpu.shape_[0]; i++)
{
ppfBlas[i] = pfDataTrans_d + i * dwKernelSlices * dwKernelRows * dwKernelCols * top_data_size[0].data_dim[3] * top_data_size[0].data_dim[2];
ppfBlas[i + dwGroup * input_data_map[0]->m_gpu.shape_[0]] = output_data_map[0]->m_gpu.pfData_gpu + i * dwsize / dwGroup;
}
CUDA_ERROR(hipMemcpyAsync(ppfBlas_d, ppfBlas, dwGroup * input_data_map[0]->m_gpu.shape_[0] * 2 * sizeof(float *), hipMemcpyHostToDevice, pNetResourceGpu->main_stream));
//CUDA_ERROR(hipMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), hipMemcpyDeviceToDevice));
float fAlpha = 1.f;
float fBeta = 1.f;
CUBLAS_ERROR(hipblasSgemmBatched(pNetResourceGpu->Handle_cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[1] / dwGroup,
dwKernelSlices * dwKernelRows * dwKernelCols, &fAlpha,
(const float **)ppfBlas_d, output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
(const float **)ppfKernel_d, dwKernelSlices * dwKernelRows * dwKernelCols, &fBeta,
ppfBlas_d + dwGroup * input_data_map[0]->data_shape[0], output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
dwGroup * input_data_map[0]->data_shape[0]));
}
else
{
//CUDA_ERROR(hipMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), hipMemcpyDeviceToDevice));
for (int i = 0; i < dwGroup * input_data_map[0]->m_gpu.shape_[0]; i++)
{
float fAlpha = 1.f;
float fBeta = 1.f;
int blas_n = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2];
int blas_m = output_data_map[0]->data_shape[1] / dwGroup;
int blas_k = dwKernelSlices * dwKernelRows * dwKernelCols;
CUBLAS_ERROR(hipblasSgemm(pNetResourceGpu->Handle_cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
blas_n, blas_m,
blas_k, &fAlpha,
pfDataTrans_d + i * dwKernelSlices * dwKernelRows * dwKernelCols * output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
blas_n,
pfKernel_d + (i % dwGroup) * dwKernelSlices * dwKernelRows * dwKernelCols * (dwKernelNum / dwGroup), blas_k,
&fBeta,
output_data_map[0]->m_gpu.pfData_gpu + i * output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1] / dwGroup,
blas_n));
}
}
#else
// CUDA_ERROR(hipMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), hipMemcpyDeviceToDevice));
int dwN = output_data_map[0]->data_shape[1] / dwGroup;
int dwM = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2];
int dwP = dwKernelSlices * dwKernelRows * dwKernelCols;
dim3 blocksize(CUDA_BLOCK(dwM, 16), CUDA_BLOCK(dwN, 16) * dwGroup * input_data_map[0]->m_gpu.shape_[0]);
dim3 threadsize(16, 16);
gConvMatrixMult_kernel << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pfKernel_d, pfDataTrans_d, output_data_map[0]->m_gpu.pfData_gpu, dwN, dwM, dwP, dwGroup);
#endif
output_data_map[0]->dwStorageType = DATA_GPU;
#ifdef _DEBUG
hipEventRecord(stop1, NULL);
hipEventSynchronize(stop1);
float msecTotal1 = 0.0f;
hipEventElapsedTime(&msecTotal1, start1, stop1);
//printf(" Convolution: %f ms [%d < 10000 : batch ? stream]\n ", msecTotal1, top_data_size[0].dwSize / dwGroup);
#endif
#ifdef _DEBUG
float *pfDataOut = new float[output_data_map[0]->m_gpu.data_size];
output_data_map[0]->m_gpu.Gpu_DataOut(pNetResourceGpu, DATA_CPU_WIDTH, pfDataOut);
delete[] pfDataOut;
hipDeviceSynchronize();
printf("Convolution:%s\n", hipGetErrorString(hipGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Caculate(const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w,
int& output_h, int& output_w)
{
if (m_tf_padding == "VALID")
{
output_h = ceil((height + 2 * pad_h -
(dilation_h * (kernel_h - 1))) / float(stride_h));
output_w = ceil((width + 2 * pad_w -
(dilation_w * (kernel_w - 1))) / float(stride_w));
}
else if (m_tf_padding == "SAME")
{
output_h = ceil((height + 2 * pad_h) / float(stride_h));
output_w = ceil((width + 2 * pad_w) / float(stride_w));
int original_view_h = height + 2 * pad_h;
int original_view_w = width + 2 * pad_w;
int need_view_h = output_h * stride_h + kernel_h - 1;
int need_view_w = output_w * stride_w + kernel_w - 1;
m_tf_fake_padding_h = (need_view_h - original_view_h) / 2;
m_tf_fake_padding_w = (need_view_w - original_view_w) / 2;
int tf_need_view_h = (output_h - 1) * stride_h + kernel_h;
int tf_need_view_w = (output_w - 1) * stride_w + kernel_w;
m_tf_conv_shift_h = -m_tf_fake_padding_h + (tf_need_view_h - original_view_h) / 2;
m_tf_conv_shift_w = -m_tf_fake_padding_w + (tf_need_view_w - original_view_w) / 2;
}
else
{
output_h = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
output_w = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
}
return 0;
}
int HolidayConvolutionGPU::Calculate(const std::vector<int> &bottom_shape, std::vector<int> &top_shape) {
top_shape.resize(4);
top_shape[0] = bottom_shape[0];
top_shape[1] = dwKernelNum;
Caculate(bottom_shape[2], bottom_shape[3],
dwKernelRows, dwKernelCols,
dwPadH, dwPadW,
dwStrideH, dwStrideW,
dwDilationH, dwDilationW,
top_shape[2], top_shape[3]);
return 0;
}
|
f820d49407bf25b866dd49bb6ea6e5124828f678.cu
|
#include"HolidayConvolutionGPU.h"
#include"HolidayNetResource.h"
#include"HolidayCNN_proto.pb.h"
#include"HolidayBlobGpu.h"
HolidayConvolutionGPU::HolidayConvolutionGPU()
: pfKernel_d(nullptr), pfBias_d(nullptr), ppfBlas(nullptr), ppfBlas_d(nullptr), ppfKernel_d(nullptr)
{
}
HolidayConvolutionGPU::~HolidayConvolutionGPU()
{
}
__global__ static void gConvMatrixMult_kernel(float *pfA, float *pfB, float *pfC, int dwN, int dwM, int dwP, int dwG)
{
__shared__ float pfTmpA[16][16];
__shared__ float pfTmpB[16][16];
int dwDimNG = (blockDim.y * blockIdx.y + threadIdx.y) / (CUDA_BLOCK(dwN, 16) * 16);
int dwDimG = dwDimNG % dwG;
float *pfOffA = pfA + dwDimG * dwN * dwP;
float *pfOffB = pfB + dwDimNG * dwP * dwM;
float *pfOffC = pfC + dwDimNG * dwN * dwM;
int dwGlobalIdxN = (blockDim.y * blockIdx.y + threadIdx.y) % (CUDA_BLOCK(dwN, 16) * 16);
int dwGlobalIdxM = blockDim.x * blockIdx.x + threadIdx.x;
int dwLocalIdxN = threadIdx.y;
int dwLocalIdxM = threadIdx.x;
float fResults = 0;
float fComp = 0;
for (int j = 0; j < dwP; j += 16)
{
if (dwGlobalIdxN < dwN && dwLocalIdxM + j < dwP)
{
pfTmpA[dwLocalIdxN][dwLocalIdxM] = pfOffA[dwGlobalIdxN * dwP + dwLocalIdxM + j];
}
else
{
pfTmpA[dwLocalIdxN][dwLocalIdxM] = 0;
}
if (dwGlobalIdxM < dwM && dwLocalIdxN + j < dwP)
{
pfTmpB[dwLocalIdxN][dwLocalIdxM] = pfOffB[(dwLocalIdxN + j) * dwM + dwGlobalIdxM];
}
else
{
pfTmpB[dwLocalIdxN][dwLocalIdxM] = 0;
}
__syncthreads();
for (int i = 0; i < 16; i++)
{
float fTmp;
fComp -= pfTmpA[dwLocalIdxN][i] * pfTmpB[i][dwLocalIdxM];
fTmp = fResults - fComp;
fComp = (fTmp - fResults) + fComp;
fResults = fTmp;
}
__syncthreads();
}
if (dwGlobalIdxM < dwM && dwGlobalIdxN < dwN)
{
pfOffC[dwGlobalIdxN * dwM + dwGlobalIdxM] += fResults;
}
}
__global__ static void gInputTrans_kernel(float *pfDataIn, float *pfDataOut, int dwSize, int dwRowIn, int dwColIn,
int dwSliceIn, int dwRowOut, int dwColOut, int dwSliceOut, int dwStrideH, int dwStrideW,
int dwPadH, int dwPadW,
int dwShiftH, int dwShiftW,
int dwDilationH, int dwDilationW, int dwKernelH, int dwKernelW)
{
dwPadH += dwShiftH;
dwPadW += dwShiftW;
int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (dwIdx < dwSize)
{
int dwDimN = dwIdx / (dwSliceOut * dwRowOut * dwColOut);
int dwDim2S = dwIdx % (dwSliceOut * dwRowOut * dwColOut) / (dwRowOut * dwColOut);
int dwDim2R = dwIdx % (dwRowOut * dwColOut) / dwColOut;
int dwDim2C = dwIdx % dwColOut;
int dwDim1R = dwDim2R * dwStrideH - dwPadH;
int dwDim1C = dwDim2C * dwStrideW - dwPadW;
int dwDim1S = dwDim2S;
int dwIdxOut = ((dwDimN * dwSliceOut + dwDim1S) * dwKernelH * dwKernelW * dwRowOut + dwDim2R) * dwColOut + dwDim2C;
int dwIdxIn = dwDim1C + dwColIn * (dwDim1R + dwRowIn * (dwDim1S + dwSliceIn * dwDimN));
for (int i = 0; i < dwKernelH; i++)
{
for (int j = 0; j < dwKernelW; j++)
{
if (dwDim1R + i * dwDilationH >= 0 && dwDim1R + i * dwDilationH < dwRowIn
&& dwDim1C + j * dwDilationW >= 0 && dwDim1C + j * dwDilationW < dwColIn)
{
pfDataOut[dwIdxOut + dwColOut * dwRowOut * (i * dwKernelW + j)] =
pfDataIn[dwIdxIn + j * dwDilationW + dwColIn * i * dwDilationH];
}
else
{
pfDataOut[dwIdxOut + dwColOut * dwRowOut * (i * dwKernelW + j)] = 0;
}
}
}
}
}
//__global__ static void gBiasSet_kernel(float *pfBias, float *pfOut, int dwBiasStep, int dwOutSize)
//{
// int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
// if (dwIdx < dwOutSize)
// pfOut[dwIdx] = pfBias[dwIdx / dwBiasStep];
//}
__global__ static void gBiasSet_kernel(float *pfBias, float *pfOut, int dwBiasStep, int dwExtStep, int dwOutSize)
{
int dwIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (dwIdx < dwOutSize)
pfOut[dwIdx] = pfBias[(dwIdx % dwExtStep) / dwBiasStep];
}
int HolidayConvolutionGPU::Init(Holiday_LayerParameter &inputparam, HolidayNetResource<float> *p_holiday_net_resource)
{
pNetResourceGpu = (HolidayNetResourceGpu *)p_holiday_net_resource->pNetResourceGpu;
//
dwKernelNum = inputparam.convolution_param().kernel_param().shape().dim(0);
dwKernelRows = inputparam.convolution_param().kernel_param().shape().dim(2);
dwKernelCols = inputparam.convolution_param().kernel_param().shape().dim(3);
dwKernelSlices = inputparam.convolution_param().kernel_param().shape().dim(1);
//std::cout << "conv init 1" << std::endl;
int bottom_index = inputparam.bottom_index(0);
HolidayDataSize bottom_size = p_holiday_net_resource->feature_vector_size[bottom_index];
this->bottom_data_size.resize(1);
this->bottom_data_size[0] = bottom_size;
std::vector<int> shape;
const ::Holiday_BlobShape& tmp_shape = inputparam.convolution_param().kernel_param().shape();
//std::cout << "conv init 2" << std::endl;
for (int i = 0; i < tmp_shape.dim_size(); i++)
{
shape.push_back(tmp_shape.dim(i));
}
dwGroup = bottom_data_size[0].data_dim[1] / dwKernelSlices;
dwStrideH = inputparam.convolution_param().stride_height();
dwStrideW = inputparam.convolution_param().stride_width();
dwPadH = inputparam.convolution_param().pad_height();
dwPadW = inputparam.convolution_param().pad_width();
dwDilationH = inputparam.convolution_param().dilation_height();
dwDilationW = inputparam.convolution_param().dilation_width();
if (inputparam.convolution_param().has_tf_padding())
{
m_tf_padding = inputparam.convolution_param().tf_padding();
}
top_data_size.resize(1);
// calculate top blobs
Calculate(bottom_data_size[0].data_dim, top_data_size[0].data_dim);
bool is_1x1_conv = dwKernelRows == 1 && dwKernelCols == 1 && dwPadH == 0 && dwPadW == 0 && dwStrideH == 1 && dwStrideW == 1;
// tmp buffer
int dwKernelSize = dwKernelSlices * dwKernelRows * dwKernelCols;
if (!is_1x1_conv) {
gTmpBuffer_gpu(pNetResourceGpu, top_data_size[0].data_dim[3] * top_data_size[0].data_dim[2] * dwKernelSize * dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float));
}
// transData
ppfBlas = new float*[dwGroup * pNetResourceGpu->dwMaxBatchNum * 2];
CUDA_ERROR(SafeCudaMalloc((void **)&ppfBlas_d, dwGroup * pNetResourceGpu->dwMaxBatchNum * 2 * sizeof(float*)));
//kernel param
CUDA_ERROR(SafeCudaMalloc((void **)&pfKernel_d, dwKernelSize * dwKernelNum * sizeof(float)));
const float *pfKernelT = inputparam.mutable_convolution_param()->mutable_kernel_param()->mutable_data()->mutable_data();
CUDA_ERROR(cudaMemcpyAsync(pfKernel_d, pfKernelT, dwKernelSize *dwKernelNum* sizeof(float), cudaMemcpyHostToDevice, pNetResourceGpu->main_stream));
std::unique_ptr<float*[]> ppfKernel(new float*[dwGroup * pNetResourceGpu->dwMaxBatchNum]);
CUDA_ERROR(SafeCudaMalloc((void **)&ppfKernel_d, dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float*)));
for (int i = 0; i < dwGroup * pNetResourceGpu->dwMaxBatchNum; ++i)
{
ppfKernel[i] = pfKernel_d + (i % dwGroup) * dwKernelSize * (dwKernelNum / dwGroup);
}
CUDA_ERROR(cudaMemcpyAsync(ppfKernel_d, ppfKernel.get(), dwGroup * pNetResourceGpu->dwMaxBatchNum * sizeof(float *), cudaMemcpyHostToDevice, pNetResourceGpu->main_stream));
//bias param
int dwsize = top_data_size[0].data_dim[1] * top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3];
CUDA_ERROR(SafeCudaMalloc((void **)&pfBias_d, dwKernelNum * sizeof(float)));
//float *pfBiasTmp_d;
//CUDA_ERROR(SafeCudaMalloc((void **)&pfBiasTmp_d, dwKernelNum * sizeof(float)));
if (inputparam.convolution_param().bias_param().data().size())
{
const float *pfBias = inputparam.mutable_convolution_param()->mutable_bias_param()->mutable_data()->mutable_data();
CUDA_ERROR(cudaMemcpyAsync(pfBias_d, pfBias, dwKernelNum * sizeof(float), cudaMemcpyHostToDevice, pNetResourceGpu->main_stream));
}
else
{
CUDA_ERROR(cudaMemsetAsync(pfBias_d, 0, dwKernelNum * sizeof(float), pNetResourceGpu->main_stream));
}
cudaStreamSynchronize(pNetResourceGpu->main_stream);
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Exit()
{
if (pfKernel_d) cudaFree(pfKernel_d);
if (pfBias_d) cudaFree(pfBias_d);
if (ppfBlas) delete[]ppfBlas;
if (ppfBlas_d) cudaFree(ppfBlas_d);
if (ppfKernel_d) cudaFree(ppfKernel_d);
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Process(std::vector<HolidayFeatureMap<float>*> input_data_map, std::vector<HolidayFeatureMap<float>*>& output_data_map)
{
#ifdef _DEBUG
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
#endif
input_data_map[0]->m_gpu.shape_ = input_data_map[0]->data_shape;
input_data_map[0]->m_gpu.Gpu_DataIn(pNetResourceGpu, input_data_map[0]->dwStorageType, input_data_map[0]->m_cpu.dataMemoryPtr());
input_data_map[0]->dwStorageType = DATA_GPU;
output_data_map[0]->dwStorageType = DATA_GPU;
Calculate(input_data_map[0]->data_shape, output_data_map[0]->data_shape);
output_data_map[0]->m_gpu.shape_ = output_data_map[0]->data_shape;
output_data_map[0]->m_gpu.data_size = output_data_map[0]->data_shape[0] * output_data_map[0]->data_shape[1] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3];
//gInputTrans_kernel << <CUDA_BLOCK(top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3]
// * bottom_data_size[0].data_dim[1] * input_data_map[0]->m_gpu.shape_[0], CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >
// (input_data_map[0]->m_gpu.pfData_gpu, pfDataTrans_d,
// top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3] * bottom_data_size[0].data_dim[1] * input_data_map[0]->m_gpu.shape_[0],
// bottom_data_size[0].data_dim[2], bottom_data_size[0].data_dim[3], bottom_data_size[0].data_dim[1],
// top_data_size[0].data_dim[2], top_data_size[0].data_dim[3], bottom_data_size[0].data_dim[1],
// dwStrideH, dwStrideW, dwPadH, dwPadW, dwDilationH, dwDilationW, dwKernelRows, dwKernelCols);
int dwsize = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1];
int put_param = output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3] * input_data_map[0]->data_shape[1] * input_data_map[0]->data_shape[0];
bool is_1x1_conv = dwKernelRows == 1 && dwKernelCols == 1 && dwPadH == 0 && dwPadW == 0 && dwStrideH == 1 && dwStrideW == 1;
float *pfDataTrans_d = nullptr;
if (is_1x1_conv)
{
pfDataTrans_d = (float *)input_data_map[0]->m_gpu.pfData_gpu;
}
else
{
gTmpBuffer_gpu(pNetResourceGpu, output_data_map[0]->m_gpu.data_size * dwKernelRows * dwKernelCols * dwGroup * sizeof(float));
pfDataTrans_d = (float *)pNetResourceGpu->pubyConvTmpBuffer;
gInputTrans_kernel << <CUDA_BLOCK(put_param, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >
((float *)input_data_map[0]->m_gpu.pfData_gpu, pfDataTrans_d,
output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[3] * input_data_map[0]->data_shape[1] * input_data_map[0]->data_shape[0],
input_data_map[0]->data_shape[2], input_data_map[0]->data_shape[3], input_data_map[0]->data_shape[1],
output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[3], input_data_map[0]->data_shape[1],
dwStrideH, dwStrideW,
dwPadH + m_tf_fake_padding_h, dwPadW + m_tf_fake_padding_w,
m_tf_conv_shift_h, m_tf_conv_shift_w,
dwDilationH, dwDilationW, dwKernelRows, dwKernelCols);
}
#ifdef _DEBUG
// int buffer_size = output_data_map[0]->m_gpu.data_size * dwKernelRows * dwKernelCols * dwGroup;
//
// float *pfcol_DataOut = new float[buffer_size];
// CUDA_ERROR(cudaMemcpy(pfcol_DataOut, pfDataTrans_d, buffer_size* sizeof(float), cudaMemcpyDeviceToHost));
// delete[] pfcol_DataOut;
// cudaDeviceSynchronize();
#endif
//gBiasSet_kernel << <CUDA_BLOCK(output_data_map[0]->m_gpu.shape_, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >(pfBias_d, (float *)output_data_map[0]->m_gpu.pfData_gpu,
// output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1], output_data_map[0]->m_gpu.data_size);
gBiasSet_kernel << <CUDA_BLOCK(output_data_map[0]->m_gpu.data_size, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, pNetResourceGpu->main_stream>> >(pfBias_d,
(float *)output_data_map[0]->m_gpu.pfData_gpu,
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1],
output_data_map[0]->m_gpu.data_size);
#ifdef _X64_
if (dwsize / dwGroup < 10000)
{
for (int i = 0; i < dwGroup * input_data_map[0]->m_gpu.shape_[0]; i++)
{
ppfBlas[i] = pfDataTrans_d + i * dwKernelSlices * dwKernelRows * dwKernelCols * top_data_size[0].data_dim[3] * top_data_size[0].data_dim[2];
ppfBlas[i + dwGroup * input_data_map[0]->m_gpu.shape_[0]] = output_data_map[0]->m_gpu.pfData_gpu + i * dwsize / dwGroup;
}
CUDA_ERROR(cudaMemcpyAsync(ppfBlas_d, ppfBlas, dwGroup * input_data_map[0]->m_gpu.shape_[0] * 2 * sizeof(float *), cudaMemcpyHostToDevice, pNetResourceGpu->main_stream));
//CUDA_ERROR(cudaMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), cudaMemcpyDeviceToDevice));
float fAlpha = 1.f;
float fBeta = 1.f;
CUBLAS_ERROR(cublasSgemmBatched(pNetResourceGpu->Handle_cublas, CUBLAS_OP_N, CUBLAS_OP_N,
output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2], output_data_map[0]->data_shape[1] / dwGroup,
dwKernelSlices * dwKernelRows * dwKernelCols, &fAlpha,
(const float **)ppfBlas_d, output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
(const float **)ppfKernel_d, dwKernelSlices * dwKernelRows * dwKernelCols, &fBeta,
ppfBlas_d + dwGroup * input_data_map[0]->data_shape[0], output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
dwGroup * input_data_map[0]->data_shape[0]));
}
else
{
//CUDA_ERROR(cudaMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), cudaMemcpyDeviceToDevice));
for (int i = 0; i < dwGroup * input_data_map[0]->m_gpu.shape_[0]; i++)
{
float fAlpha = 1.f;
float fBeta = 1.f;
int blas_n = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2];
int blas_m = output_data_map[0]->data_shape[1] / dwGroup;
int blas_k = dwKernelSlices * dwKernelRows * dwKernelCols;
CUBLAS_ERROR(cublasSgemm(pNetResourceGpu->Handle_cublas, CUBLAS_OP_N, CUBLAS_OP_N,
blas_n, blas_m,
blas_k, &fAlpha,
pfDataTrans_d + i * dwKernelSlices * dwKernelRows * dwKernelCols * output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2],
blas_n,
pfKernel_d + (i % dwGroup) * dwKernelSlices * dwKernelRows * dwKernelCols * (dwKernelNum / dwGroup), blas_k,
&fBeta,
output_data_map[0]->m_gpu.pfData_gpu + i * output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2] * output_data_map[0]->data_shape[1] / dwGroup,
blas_n));
}
}
#else
// CUDA_ERROR(cudaMemcpy(output_data_map[0]->m_gpu.pfData_gpu, pfBias_d, dwsize * input_data_map[0]->m_gpu.shape_[0] * sizeof(float), cudaMemcpyDeviceToDevice));
int dwN = output_data_map[0]->data_shape[1] / dwGroup;
int dwM = output_data_map[0]->data_shape[3] * output_data_map[0]->data_shape[2];
int dwP = dwKernelSlices * dwKernelRows * dwKernelCols;
dim3 blocksize(CUDA_BLOCK(dwM, 16), CUDA_BLOCK(dwN, 16) * dwGroup * input_data_map[0]->m_gpu.shape_[0]);
dim3 threadsize(16, 16);
gConvMatrixMult_kernel << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pfKernel_d, pfDataTrans_d, output_data_map[0]->m_gpu.pfData_gpu, dwN, dwM, dwP, dwGroup);
#endif
output_data_map[0]->dwStorageType = DATA_GPU;
#ifdef _DEBUG
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
//printf(" Convolution: %f ms [%d < 10000 : batch ? stream]\n ", msecTotal1, top_data_size[0].dwSize / dwGroup);
#endif
#ifdef _DEBUG
float *pfDataOut = new float[output_data_map[0]->m_gpu.data_size];
output_data_map[0]->m_gpu.Gpu_DataOut(pNetResourceGpu, DATA_CPU_WIDTH, pfDataOut);
delete[] pfDataOut;
cudaDeviceSynchronize();
printf("Convolution:%s\n", cudaGetErrorString(cudaGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
int HolidayConvolutionGPU::Caculate(const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w,
int& output_h, int& output_w)
{
if (m_tf_padding == "VALID")
{
output_h = ceil((height + 2 * pad_h -
(dilation_h * (kernel_h - 1))) / float(stride_h));
output_w = ceil((width + 2 * pad_w -
(dilation_w * (kernel_w - 1))) / float(stride_w));
}
else if (m_tf_padding == "SAME")
{
output_h = ceil((height + 2 * pad_h) / float(stride_h));
output_w = ceil((width + 2 * pad_w) / float(stride_w));
int original_view_h = height + 2 * pad_h;
int original_view_w = width + 2 * pad_w;
int need_view_h = output_h * stride_h + kernel_h - 1;
int need_view_w = output_w * stride_w + kernel_w - 1;
m_tf_fake_padding_h = (need_view_h - original_view_h) / 2;
m_tf_fake_padding_w = (need_view_w - original_view_w) / 2;
int tf_need_view_h = (output_h - 1) * stride_h + kernel_h;
int tf_need_view_w = (output_w - 1) * stride_w + kernel_w;
m_tf_conv_shift_h = -m_tf_fake_padding_h + (tf_need_view_h - original_view_h) / 2;
m_tf_conv_shift_w = -m_tf_fake_padding_w + (tf_need_view_w - original_view_w) / 2;
}
else
{
output_h = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
output_w = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
}
return 0;
}
int HolidayConvolutionGPU::Calculate(const std::vector<int> &bottom_shape, std::vector<int> &top_shape) {
top_shape.resize(4);
top_shape[0] = bottom_shape[0];
top_shape[1] = dwKernelNum;
Caculate(bottom_shape[2], bottom_shape[3],
dwKernelRows, dwKernelCols,
dwPadH, dwPadW,
dwStrideH, dwStrideW,
dwDilationH, dwDilationW,
top_shape[2], top_shape[3]);
return 0;
}
|
3055e51490921f2b71dea6b94262d1708be455d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample calculates scalar products of a
* given set of input vector pairs
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <helper_functions.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU
///////////////////////////////////////////////////////////////////////////////
extern "C"
void scalarProdCPU(
float *h_C,
float *h_A,
float *h_B,
int vectorN,
int elementN
);
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on GPU
///////////////////////////////////////////////////////////////////////////////
#include "scalarProd_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
//Total number of input vector pairs; arbitrary
const int VECTOR_N = 256;
//Number of elements per vector; arbitrary,
//but strongly preferred to be a multiple of warp size
//to meet memory coalescing constraints
const int ELEMENT_N = 4096;
//Total number of data elements
const int DATA_N = VECTOR_N * ELEMENT_N;
const int DATA_SZ = DATA_N * sizeof(float);
const int RESULT_SZ = VECTOR_N * sizeof(float);
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C_CPU, *h_C_GPU;
float *d_A, *d_B, *d_C;
double delta, ref, sum_delta, sum_ref, L1norm;
StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_A = (float *)malloc(DATA_SZ);
h_B = (float *)malloc(DATA_SZ);
h_C_CPU = (float *)malloc(RESULT_SZ);
h_C_GPU = (float *)malloc(RESULT_SZ);
printf("...allocating GPU memory.\n");
checkCudaErrors(hipMalloc((void **)&d_A, DATA_SZ));
checkCudaErrors(hipMalloc((void **)&d_B, DATA_SZ));
checkCudaErrors(hipMalloc((void **)&d_C, RESULT_SZ));
printf("...generating input data in CPU mem.\n");
srand(123);
//Generating input data on CPU
for (i = 0; i < DATA_N; i++)
{
h_A[i] = RandFloat(0.0f, 1.0f);
h_B[i] = RandFloat(0.0f, 1.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_A, h_A, DATA_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, DATA_SZ, hipMemcpyHostToDevice));
printf("Data init done.\n");
printf("Executing GPU kernel...\n");
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
hipLaunchKernelGGL(( scalarProdGPU), dim3(128), dim3(256), 0, 0, d_C, d_A, d_B, VECTOR_N, ELEMENT_N);
getLastCudaError("scalarProdGPU() execution failed\n");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
printf("GPU time: %f msecs.\n", sdkGetTimerValue(&hTimer));
printf("Reading back GPU result...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_C_GPU, d_C, RESULT_SZ, hipMemcpyDeviceToHost));
printf("Checking GPU results...\n");
printf("..running CPU scalar product calculation\n");
scalarProdCPU(h_C_CPU, h_A, h_B, VECTOR_N, ELEMENT_N);
printf("...comparing the results\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
for (i = 0; i < VECTOR_N; i++)
{
delta = fabs(h_C_GPU[i] - h_C_CPU[i]);
ref = h_C_CPU[i];
sum_delta += delta;
sum_ref += ref;
}
L1norm = sum_delta / sum_ref;
printf("Shutting down...\n");
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_A));
free(h_C_GPU);
free(h_C_CPU);
free(h_B);
free(h_A);
sdkDeleteTimer(&hTimer);
hipDeviceReset();
printf("L1 error: %E\n", L1norm);
printf((L1norm < 1e-6) ? "Test passed\n" : "Test failed!\n");
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
3055e51490921f2b71dea6b94262d1708be455d8.cu
|
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample calculates scalar products of a
* given set of input vector pairs
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <helper_functions.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU
///////////////////////////////////////////////////////////////////////////////
extern "C"
void scalarProdCPU(
float *h_C,
float *h_A,
float *h_B,
int vectorN,
int elementN
);
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on GPU
///////////////////////////////////////////////////////////////////////////////
#include "scalarProd_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
//Total number of input vector pairs; arbitrary
const int VECTOR_N = 256;
//Number of elements per vector; arbitrary,
//but strongly preferred to be a multiple of warp size
//to meet memory coalescing constraints
const int ELEMENT_N = 4096;
//Total number of data elements
const int DATA_N = VECTOR_N * ELEMENT_N;
const int DATA_SZ = DATA_N * sizeof(float);
const int RESULT_SZ = VECTOR_N * sizeof(float);
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C_CPU, *h_C_GPU;
float *d_A, *d_B, *d_C;
double delta, ref, sum_delta, sum_ref, L1norm;
StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_A = (float *)malloc(DATA_SZ);
h_B = (float *)malloc(DATA_SZ);
h_C_CPU = (float *)malloc(RESULT_SZ);
h_C_GPU = (float *)malloc(RESULT_SZ);
printf("...allocating GPU memory.\n");
checkCudaErrors(cudaMalloc((void **)&d_A, DATA_SZ));
checkCudaErrors(cudaMalloc((void **)&d_B, DATA_SZ));
checkCudaErrors(cudaMalloc((void **)&d_C, RESULT_SZ));
printf("...generating input data in CPU mem.\n");
srand(123);
//Generating input data on CPU
for (i = 0; i < DATA_N; i++)
{
h_A[i] = RandFloat(0.0f, 1.0f);
h_B[i] = RandFloat(0.0f, 1.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_A, h_A, DATA_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, DATA_SZ, cudaMemcpyHostToDevice));
printf("Data init done.\n");
printf("Executing GPU kernel...\n");
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
scalarProdGPU<<<128, 256>>>(d_C, d_A, d_B, VECTOR_N, ELEMENT_N);
getLastCudaError("scalarProdGPU() execution failed\n");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
printf("GPU time: %f msecs.\n", sdkGetTimerValue(&hTimer));
printf("Reading back GPU result...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_C_GPU, d_C, RESULT_SZ, cudaMemcpyDeviceToHost));
printf("Checking GPU results...\n");
printf("..running CPU scalar product calculation\n");
scalarProdCPU(h_C_CPU, h_A, h_B, VECTOR_N, ELEMENT_N);
printf("...comparing the results\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
for (i = 0; i < VECTOR_N; i++)
{
delta = fabs(h_C_GPU[i] - h_C_CPU[i]);
ref = h_C_CPU[i];
sum_delta += delta;
sum_ref += ref;
}
L1norm = sum_delta / sum_ref;
printf("Shutting down...\n");
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_A));
free(h_C_GPU);
free(h_C_CPU);
free(h_B);
free(h_A);
sdkDeleteTimer(&hTimer);
cudaDeviceReset();
printf("L1 error: %E\n", L1norm);
printf((L1norm < 1e-6) ? "Test passed\n" : "Test failed!\n");
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
a528f231e9d5cbab6bc26787fc74627300393196.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void conv2d_reduce_kernel(const float * input_data, float *output_data, size_t input_size, size_t output_size, size_t batch_size){
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= output_size) return;
float temp = 0;
for(int i = 0; i < batch_size; i++){
for ( int j = 0; j < input_size; j++){
temp += input_data[i * input_size * output_size + id * input_size + j];
}
}
output_data[id] = temp;
}
// a naive type!!!
int DLGpuConv2d_reduce_sum(const DLArrayHandle input_x, DLArrayHandle output_y, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x -> shape[1] == output_y -> shape[0]);
const float *input_data = (const float *) input_x -> data;
float* output_data = (float *) output_y ->data;
size_t batch_size = input_x -> shape[0];
size_t input_size = input_x -> shape[2] * input_x -> shape[3];
size_t output_size = output_y ->shape[0];
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( conv2d_reduce_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, input_data, output_data, input_size, output_size, batch_size);
else
hipLaunchKernelGGL(( conv2d_reduce_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, input_data, output_data, input_size, output_size, batch_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input *= input_x -> shape[i];
for(int i = 0; i < output_y -> ndim; i++)
size_output *= output_y -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
|
a528f231e9d5cbab6bc26787fc74627300393196.cu
|
#include "gpu_runtime.h"
__global__ void conv2d_reduce_kernel(const float * input_data, float *output_data, size_t input_size, size_t output_size, size_t batch_size){
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= output_size) return;
float temp = 0;
for(int i = 0; i < batch_size; i++){
for ( int j = 0; j < input_size; j++){
temp += input_data[i * input_size * output_size + id * input_size + j];
}
}
output_data[id] = temp;
}
// a naive type!!!
int DLGpuConv2d_reduce_sum(const DLArrayHandle input_x, DLArrayHandle output_y, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x -> shape[1] == output_y -> shape[0]);
const float *input_data = (const float *) input_x -> data;
float* output_data = (float *) output_y ->data;
size_t batch_size = input_x -> shape[0];
size_t input_size = input_x -> shape[2] * input_x -> shape[3];
size_t output_size = output_y ->shape[0];
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
conv2d_reduce_kernel<<<BLOCKS, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data, output_data, input_size, output_size, batch_size);
else
conv2d_reduce_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(input_data, output_data, input_size, output_size, batch_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input *= input_x -> shape[i];
for(int i = 0; i < output_y -> ndim; i++)
size_output *= output_y -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
|
ef93cfd5f271e70d9487afc626429534f7aa6f1d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "tile.h"
#include "kernels.h"
#include "fft_helper.h"
#include "common.h"
namespace SCRIMP {
SCRIMPError_t SCRIMP_Tile::do_self_join_full(hipStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
printf("SCRIMP error\n");
return error;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t SCRIMP_Tile::do_self_join_half(hipStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1,tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t SCRIMP_Tile::do_ab_join_full(hipStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp64, full_join, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp64, full_join, s);
if(error != SCRIMP_NO_ERROR) {
printf("SCRIMP error\n");
return error;
}
return SCRIMP_NO_ERROR;
}
}
|
ef93cfd5f271e70d9487afc626429534f7aa6f1d.cu
|
#include "tile.h"
#include "kernels.h"
#include "fft_helper.h"
#include "common.h"
namespace SCRIMP {
SCRIMPError_t SCRIMP_Tile::do_self_join_full(cudaStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
printf("SCRIMP error\n");
return error;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t SCRIMP_Tile::do_self_join_half(cudaStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1,tile_start_A, tile_start_B, props, fp64, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t SCRIMP_Tile::do_ab_join_full(cudaStream_t s) {
SCRIMPError_t error;
if(window_size > tile_width) {
return SCRIMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCRIMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp64, full_join, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCRIMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp64, full_join, s);
if(error != SCRIMP_NO_ERROR) {
printf("SCRIMP error\n");
return error;
}
return SCRIMP_NO_ERROR;
}
}
|
18314e7cfa28ae1b7b40af38f6e264c29301e54c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_pipeline.h>
__global__
void tensor_transpose(int dim_input, int dim_output, int nblocks, int tile_size,
double *input, double *output) {
extern __shared__ double tile[];
int block_idx = blockIdx.x;
int phase = 0;
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
__pipeline_memcpy_async(&tile[phase * TILE_SIZE + i], &input[i + block_idx * tile_size], sizeof(double));
}
__pipeline_commit();
for (; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
if (block_idx + gridDim.x < nblocks) {
int p = 1 - phase;
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
__pipeline_memcpy_async(&tile[p * TILE_SIZE + i], &input[i + (block_idx + gridDim.x) * tile_size], sizeof(double));
}
__pipeline_commit();
}
for (int i = 0; i < dim_input; i++) {
im = it * d_shape_input_r[i];
offset1 += d_stride_input[i] * (it - im * d_shape_input[i]);
it = im;
}
if (block_idx + gridDim.x < nblocks) {
__pipeline_wait_prior(1);
} else {
__pipeline_wait_prior(0);
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it * d_shape_output_r[j];
int tmp = it - im * d_shape_output[j];
offset2 += d_stride_output_global[j] * tmp;
local_offset += d_stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[phase * TILE_SIZE + local_offset];
}
phase = 1 - phase;
__syncthreads();
}
}
|
18314e7cfa28ae1b7b40af38f6e264c29301e54c.cu
|
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_pipeline.h>
__global__
void tensor_transpose(int dim_input, int dim_output, int nblocks, int tile_size,
double *input, double *output) {
extern __shared__ double tile[];
int block_idx = blockIdx.x;
int phase = 0;
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
__pipeline_memcpy_async(&tile[phase * TILE_SIZE + i], &input[i + block_idx * tile_size], sizeof(double));
}
__pipeline_commit();
for (; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
if (block_idx + gridDim.x < nblocks) {
int p = 1 - phase;
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
__pipeline_memcpy_async(&tile[p * TILE_SIZE + i], &input[i + (block_idx + gridDim.x) * tile_size], sizeof(double));
}
__pipeline_commit();
}
for (int i = 0; i < dim_input; i++) {
im = it * d_shape_input_r[i];
offset1 += d_stride_input[i] * (it - im * d_shape_input[i]);
it = im;
}
if (block_idx + gridDim.x < nblocks) {
__pipeline_wait_prior(1);
} else {
__pipeline_wait_prior(0);
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it * d_shape_output_r[j];
int tmp = it - im * d_shape_output[j];
offset2 += d_stride_output_global[j] * tmp;
local_offset += d_stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[phase * TILE_SIZE + local_offset];
}
phase = 1 - phase;
__syncthreads();
}
}
|
af1bb21b6b0ba6b432e87e987650adf2f6ed0608.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/batch_transpose.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <rocblas.h>
#include "kblas.h"
#include "kblas_common.h"
#include "kblas_struct.h"
#include "kblas_gpu_util.ch"
#include "batch_transpose.h"
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
#define TRANSPOSE_LOAD(m) __ldg(&(m))
template<class T, class T_ptr, class TDim>
__global__
void transpose_kernel(
TDim m_batch, TDim n_batch, T_ptr matrix_data, TDim ldm_batch, int stride_m,
T_ptr transpose_data, TDim ldt_batch, int stride_t, int op_start, int ops
)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM + 1];
int x = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
int op_index = op_start + blockIdx.z;
if(op_index >= ops) return;
T* matrix = getOperationPtr<T>(matrix_data, op_index, stride_m);
T* transpose = getOperationPtr<T>(transpose_data, op_index, stride_t);
int m = getOperationDim(m_batch, op_index);
int n = getOperationDim(n_batch, op_index);
int ldm = getOperationDim(ldm_batch, op_index);
int ldt = getOperationDim(ldt_batch, op_index);
#pragma unroll
for (int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
if(x < m && y + j < n)
tile[threadIdx.y + j][threadIdx.x] = TRANSPOSE_LOAD(matrix[x + (y + j) * ldm]);
__syncthreads();
x = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
y = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
#pragma unroll
for (int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
if(y + j < m && x < n)
transpose[x + (y + j) * ldt] = tile[threadIdx.x][threadIdx.y + j];
}
template<class T, class T_ptr, class TDim>
int batch_transpose_template(
kblasHandle_t handle, TDim m_batch, TDim n_batch, int max_m, int max_n,
T_ptr matrix_data, TDim ldm_batch, int stride_m,
T_ptr transpose_data, TDim ldt_batch, int stride_t,
int ops
)
{
int ops_per_kernel = 32768;
int block_rows = iDivUp(max_m, TRANSPOSE_TILE_DIM);
int block_cols = iDivUp(max_n, TRANSPOSE_TILE_DIM);
dim3 blockDim(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
dim3 gridDim(block_rows, block_cols, ops_per_kernel);
int op_start = 0;
while(op_start < ops)
{
gridDim.z = kmin(ops_per_kernel, ops - op_start);
hipLaunchKernelGGL(( transpose_kernel<T, T_ptr, TDim>), dim3(gridDim), dim3(blockDim), 0, handle->stream ,
m_batch, n_batch, matrix_data, ldm_batch, stride_m,
transpose_data, ldt_batch, stride_t, op_start, ops
);
op_start += ops_per_kernel;
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
// Strided interface
extern "C" int kblasDtranspose_batch_strided(kblasHandle_t handle, int m, int n, double* matrix_strided, int ldm, int stride_m, double* transpose_strided, int ldt, int stride_t, int ops)
{
return batch_transpose_template<double, double*, int>(handle, m, n, m, n, matrix_strided, ldm, stride_m, transpose_strided, ldt, stride_t, ops);
}
extern "C" int kblasStranspose_batch_strided(kblasHandle_t handle, int m, int n, float* matrix_strided, int ldm, int stride_m, float* transpose_strided, int ldt, int stride_t, int ops)
{
return batch_transpose_template<float, float*, int>(handle, m, n, m, n, matrix_strided, ldm, stride_m, transpose_strided, ldt, stride_t, ops);
}
// Array of pointers interface
extern "C" int kblasDtranspose_batch(kblasHandle_t handle, int m, int n, double** matrix_ptrs, int ldm, double** transpose_ptrs, int ldt, int ops)
{
return batch_transpose_template<double, double**, int>(handle, m, n, m, n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasStranspose_batch(kblasHandle_t handle, int m, int n, float** matrix_ptrs, int ldm, float** transpose_ptrs, int ldt, int ops)
{
return batch_transpose_template<float, float**, int>(handle, m, n, m, n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasDtranspose_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, double** matrix_ptrs, int* ldm, double** transpose_ptrs, int* ldt, int ops)
{
return batch_transpose_template<double, double**, int*>(handle, m, n, max_m, max_n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasStranspose_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, float** matrix_ptrs, int* ldm, float** transpose_ptrs, int* ldt, int ops)
{
return batch_transpose_template<float, float**, int*>(handle, m, n, max_m, max_n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
|
af1bb21b6b0ba6b432e87e987650adf2f6ed0608.cu
|
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/batch_transpose.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <cublas_v2.h>
#include "kblas.h"
#include "kblas_common.h"
#include "kblas_struct.h"
#include "kblas_gpu_util.ch"
#include "batch_transpose.h"
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
#define TRANSPOSE_LOAD(m) __ldg(&(m))
template<class T, class T_ptr, class TDim>
__global__
void transpose_kernel(
TDim m_batch, TDim n_batch, T_ptr matrix_data, TDim ldm_batch, int stride_m,
T_ptr transpose_data, TDim ldt_batch, int stride_t, int op_start, int ops
)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM + 1];
int x = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
int op_index = op_start + blockIdx.z;
if(op_index >= ops) return;
T* matrix = getOperationPtr<T>(matrix_data, op_index, stride_m);
T* transpose = getOperationPtr<T>(transpose_data, op_index, stride_t);
int m = getOperationDim(m_batch, op_index);
int n = getOperationDim(n_batch, op_index);
int ldm = getOperationDim(ldm_batch, op_index);
int ldt = getOperationDim(ldt_batch, op_index);
#pragma unroll
for (int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
if(x < m && y + j < n)
tile[threadIdx.y + j][threadIdx.x] = TRANSPOSE_LOAD(matrix[x + (y + j) * ldm]);
__syncthreads();
x = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
y = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
#pragma unroll
for (int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
if(y + j < m && x < n)
transpose[x + (y + j) * ldt] = tile[threadIdx.x][threadIdx.y + j];
}
template<class T, class T_ptr, class TDim>
int batch_transpose_template(
kblasHandle_t handle, TDim m_batch, TDim n_batch, int max_m, int max_n,
T_ptr matrix_data, TDim ldm_batch, int stride_m,
T_ptr transpose_data, TDim ldt_batch, int stride_t,
int ops
)
{
int ops_per_kernel = 32768;
int block_rows = iDivUp(max_m, TRANSPOSE_TILE_DIM);
int block_cols = iDivUp(max_n, TRANSPOSE_TILE_DIM);
dim3 blockDim(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
dim3 gridDim(block_rows, block_cols, ops_per_kernel);
int op_start = 0;
while(op_start < ops)
{
gridDim.z = kmin(ops_per_kernel, ops - op_start);
transpose_kernel<T, T_ptr, TDim><<< gridDim, blockDim, 0, handle->stream >>>(
m_batch, n_batch, matrix_data, ldm_batch, stride_m,
transpose_data, ldt_batch, stride_t, op_start, ops
);
op_start += ops_per_kernel;
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
// Strided interface
extern "C" int kblasDtranspose_batch_strided(kblasHandle_t handle, int m, int n, double* matrix_strided, int ldm, int stride_m, double* transpose_strided, int ldt, int stride_t, int ops)
{
return batch_transpose_template<double, double*, int>(handle, m, n, m, n, matrix_strided, ldm, stride_m, transpose_strided, ldt, stride_t, ops);
}
extern "C" int kblasStranspose_batch_strided(kblasHandle_t handle, int m, int n, float* matrix_strided, int ldm, int stride_m, float* transpose_strided, int ldt, int stride_t, int ops)
{
return batch_transpose_template<float, float*, int>(handle, m, n, m, n, matrix_strided, ldm, stride_m, transpose_strided, ldt, stride_t, ops);
}
// Array of pointers interface
extern "C" int kblasDtranspose_batch(kblasHandle_t handle, int m, int n, double** matrix_ptrs, int ldm, double** transpose_ptrs, int ldt, int ops)
{
return batch_transpose_template<double, double**, int>(handle, m, n, m, n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasStranspose_batch(kblasHandle_t handle, int m, int n, float** matrix_ptrs, int ldm, float** transpose_ptrs, int ldt, int ops)
{
return batch_transpose_template<float, float**, int>(handle, m, n, m, n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasDtranspose_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, double** matrix_ptrs, int* ldm, double** transpose_ptrs, int* ldt, int ops)
{
return batch_transpose_template<double, double**, int*>(handle, m, n, max_m, max_n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
extern "C" int kblasStranspose_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, float** matrix_ptrs, int* ldm, float** transpose_ptrs, int* ldt, int ops)
{
return batch_transpose_template<float, float**, int*>(handle, m, n, max_m, max_n, matrix_ptrs, ldm, 0, transpose_ptrs, ldt, 0, ops);
}
|
08362a65d844081a8a608de93b7d8b8d983a88bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ void ensure_appropriate_values(float e_value, float lognormval, float* bvalue){
if (!isnan(e_value) && !isinf(e_value)) {
if (e_value<=1) {
*bvalue = 0;
}
else {
*bvalue = floorf(logf(e_value)/lognormval);
}
}
*bvalue = (float) min((int) *bvalue, HiCCUPS_W1_MAX_INDX );
}
__device__ void process_masks_lr(int i_start, int i_max_p1, int msize, int t_col, float *c,float *d, int diff,
float* evalue_d, float* evalue_dist_d, float* evalue_v, float* evalue_dist_v){
for (int i = i_start; i < i_max_p1; i++) {
int index = i * msize + t_col;
if (!isnan(c[index])) {
*evalue_d -= c[index];
*evalue_dist_d -= d[abs(i+diff-t_col)];
}
for (int j = -1; j < 2; j++) {
*evalue_v += c[index + j];
*evalue_dist_v += d[abs(i+diff-t_col-j)];
}
}
}
__device__ void process_masks_tb(int j_start, int j_max_p1, int msize, int t_row, float *c,float *d, int diff,
float* evalue_d, float* evalue_dist_d, float* evalue_h, float* evalue_dist_h){
for (int j = j_start; j < j_max_p1; j++) {
int index = t_row * msize + j;
if (!isnan(c[index])) {
*evalue_d -= c[index];
*evalue_dist_d -= d[abs(t_row+diff-j)];
}
for (int i = -1; i < 2; i++) {
*evalue_h += c[(t_row+i) * msize + j];
*evalue_dist_h += d[abs(t_row+i+diff-j)];
}
}
}
extern "C"
__global__ void BasicPeakCallingKernel(float *c, float *expectedbl, float *expecteddonut, float *expectedh,
float *expectedv, float *observed, float *b_bl, float *b_donut, float *b_h, float *b_v, float *p,
float *tbl, float *td, float *th, float *tv, float *d, float *kr1, float *kr2, float *bound1, float *bound3)
{
// 2D Thread ID
int t_col = threadIdx.x + blockIdx.x * blockDim.x;
int t_row = threadIdx.y + blockIdx.y * blockDim.y;
// Evalue is used to store the element of the matrix
// that is computed by the thread
float Evalue_bl = 0;
float Edistvalue_bl = 0;
float Evalue_donut = 0;
float Edistvalue_donut = 0;
float Evalue_h = 0;
float Edistvalue_h = 0;
float Evalue_v = 0;
float Edistvalue_v = 0;
float e_bl = 0;
float e_donut = 0;
float e_h = 0;
float e_v = 0;
float o = 0;
float sbtrkt = 0;
float bvalue_bl = 0;
float bvalue_donut = 0;
float bvalue_h = 0;
float bvalue_v = 0;
int wsize = HiCCUPS_WINDOW;
int msize = HiCCUPS_MATRIX_SIZE;
int pwidth = HiCCUPS_PEAK_WIDTH;
int buffer_width = HiCCUPS_REGION_MARGIN;
int diff = bound1[0] - bound3[0];
int diagDist = abs(t_row+diff-t_col);
int maxIndex = msize-buffer_width;
wsize = min(wsize, (abs(t_row+diff-t_col)-1)/2);
if (wsize <= pwidth) {
wsize = pwidth + 1;
}
wsize = min(wsize, buffer_width);
// only run if within central window (not in data buffer margins)
if (t_row >= buffer_width && t_row<maxIndex && t_col>= buffer_width && t_col<maxIndex) {
// calculate initial bottom left box
for (int i = t_row+1; i <= t_row+wsize; i++) {
for (int j = t_col-wsize; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_bl += c[index];
Edistvalue_bl += d[abs(i+diff-j)];
}
}
}
}
//Subtract off the middle peak
for (int i = t_row+1; i <= t_row+pwidth; i++) {
for (int j = t_col-pwidth; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_bl -= c[index];
Edistvalue_bl -= d[abs(i+diff-j)];
}
}
}
}
//fix box dimensions
while (Evalue_bl<16) {
Evalue_bl =0;
Edistvalue_bl =0;
wsize+=1;
for (int i = t_row+1; i <= t_row+wsize; i++) {
for (int j = t_col-wsize; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index]) && i+diff-j<0) {
Evalue_bl += c[index];
Edistvalue_bl += d[abs(i+diff-j)];
if (i > t_row && i < t_row+pwidth+1 && j > t_col-pwidth-1 && j < t_col) {
Evalue_bl -= c[index];
Edistvalue_bl -= d[abs(i+diff-j)];
}
}
}
}
if (wsize >= buffer_width) {
break;
}
if (2*wsize>= abs(t_row+diff-t_col)) {
break;
}
}
// calculate donut
for (int i = t_row-wsize; i <= t_row+wsize; ++i) {
for (int j = t_col-wsize; j <= t_col+wsize; ++j) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_donut += c[index];
Edistvalue_donut += d[abs(i+diff-j)];
}
}
}
}
//Subtract off the middle peak
for (int i = t_row-pwidth; i <= t_row+pwidth; ++i) {
for (int j = t_col-pwidth; j <= t_col+pwidth; ++j) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_donut -= c[index];
Edistvalue_donut -= d[abs(i+diff-j)];
}
}
}
}
//Subtract off the cross hairs left side
process_masks_lr(t_row-wsize, t_row-pwidth, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v);
//Subtract off the cross hairs right side
process_masks_lr(t_row+pwidth+1, t_row+wsize+1, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v);
//Subtract off the cross hairs top side
process_masks_tb(t_col-wsize, t_col-pwidth, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h);
//Subtract off the cross hairs bottom side
process_masks_tb(t_col+pwidth+1, t_col+wsize+1, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h);
e_bl = ((Evalue_bl*d[diagDist])/Edistvalue_bl)*kr1[t_row]*kr2[t_col];
e_donut = ((Evalue_donut*d[diagDist])/Edistvalue_donut)*kr1[t_row]*kr2[t_col];
e_h = ((Evalue_h*d[diagDist])/Edistvalue_h)*kr1[t_row]*kr2[t_col];
e_v = ((Evalue_v*d[diagDist])/Edistvalue_v)*kr1[t_row]*kr2[t_col];
float lognorm = logf(powf(2.0,.33));
ensure_appropriate_values(e_bl, lognorm, &bvalue_bl);
ensure_appropriate_values(e_donut, lognorm, &bvalue_donut);
ensure_appropriate_values(e_h, lognorm, &bvalue_h);
ensure_appropriate_values(e_v, lognorm, &bvalue_v);
// Write the matrix to device memory;
// each thread writes one element
int val_index = t_row * msize + t_col;
expectedbl[val_index] = e_bl;
expecteddonut[val_index] = e_donut;
expectedh[val_index] = e_h;
expectedv[val_index] = e_v;
o = roundf(c[val_index]*kr1[t_row]*kr2[t_col]);
observed[val_index] = o;
b_bl[val_index] = bvalue_bl;
b_donut[val_index] = bvalue_donut;
b_h[val_index] = bvalue_h;
b_v[val_index] = bvalue_v;
sbtrkt = fmaxf(tbl[(int) bvalue_bl],td[(int) bvalue_donut]);
sbtrkt = fmaxf(sbtrkt, th[(int) bvalue_h]);
sbtrkt = fmaxf(sbtrkt, tv[(int) bvalue_v]);
p[val_index] = o-sbtrkt;
}
}
|
08362a65d844081a8a608de93b7d8b8d983a88bb.cu
|
__device__ void ensure_appropriate_values(float e_value, float lognormval, float* bvalue){
if (!isnan(e_value) && !isinf(e_value)) {
if (e_value<=1) {
*bvalue = 0;
}
else {
*bvalue = floorf(logf(e_value)/lognormval);
}
}
*bvalue = (float) min((int) *bvalue, HiCCUPS_W1_MAX_INDX );
}
__device__ void process_masks_lr(int i_start, int i_max_p1, int msize, int t_col, float *c,float *d, int diff,
float* evalue_d, float* evalue_dist_d, float* evalue_v, float* evalue_dist_v){
for (int i = i_start; i < i_max_p1; i++) {
int index = i * msize + t_col;
if (!isnan(c[index])) {
*evalue_d -= c[index];
*evalue_dist_d -= d[abs(i+diff-t_col)];
}
for (int j = -1; j < 2; j++) {
*evalue_v += c[index + j];
*evalue_dist_v += d[abs(i+diff-t_col-j)];
}
}
}
__device__ void process_masks_tb(int j_start, int j_max_p1, int msize, int t_row, float *c,float *d, int diff,
float* evalue_d, float* evalue_dist_d, float* evalue_h, float* evalue_dist_h){
for (int j = j_start; j < j_max_p1; j++) {
int index = t_row * msize + j;
if (!isnan(c[index])) {
*evalue_d -= c[index];
*evalue_dist_d -= d[abs(t_row+diff-j)];
}
for (int i = -1; i < 2; i++) {
*evalue_h += c[(t_row+i) * msize + j];
*evalue_dist_h += d[abs(t_row+i+diff-j)];
}
}
}
extern "C"
__global__ void BasicPeakCallingKernel(float *c, float *expectedbl, float *expecteddonut, float *expectedh,
float *expectedv, float *observed, float *b_bl, float *b_donut, float *b_h, float *b_v, float *p,
float *tbl, float *td, float *th, float *tv, float *d, float *kr1, float *kr2, float *bound1, float *bound3)
{
// 2D Thread ID
int t_col = threadIdx.x + blockIdx.x * blockDim.x;
int t_row = threadIdx.y + blockIdx.y * blockDim.y;
// Evalue is used to store the element of the matrix
// that is computed by the thread
float Evalue_bl = 0;
float Edistvalue_bl = 0;
float Evalue_donut = 0;
float Edistvalue_donut = 0;
float Evalue_h = 0;
float Edistvalue_h = 0;
float Evalue_v = 0;
float Edistvalue_v = 0;
float e_bl = 0;
float e_donut = 0;
float e_h = 0;
float e_v = 0;
float o = 0;
float sbtrkt = 0;
float bvalue_bl = 0;
float bvalue_donut = 0;
float bvalue_h = 0;
float bvalue_v = 0;
int wsize = HiCCUPS_WINDOW;
int msize = HiCCUPS_MATRIX_SIZE;
int pwidth = HiCCUPS_PEAK_WIDTH;
int buffer_width = HiCCUPS_REGION_MARGIN;
int diff = bound1[0] - bound3[0];
int diagDist = abs(t_row+diff-t_col);
int maxIndex = msize-buffer_width;
wsize = min(wsize, (abs(t_row+diff-t_col)-1)/2);
if (wsize <= pwidth) {
wsize = pwidth + 1;
}
wsize = min(wsize, buffer_width);
// only run if within central window (not in data buffer margins)
if (t_row >= buffer_width && t_row<maxIndex && t_col>= buffer_width && t_col<maxIndex) {
// calculate initial bottom left box
for (int i = t_row+1; i <= t_row+wsize; i++) {
for (int j = t_col-wsize; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_bl += c[index];
Edistvalue_bl += d[abs(i+diff-j)];
}
}
}
}
//Subtract off the middle peak
for (int i = t_row+1; i <= t_row+pwidth; i++) {
for (int j = t_col-pwidth; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_bl -= c[index];
Edistvalue_bl -= d[abs(i+diff-j)];
}
}
}
}
//fix box dimensions
while (Evalue_bl<16) {
Evalue_bl =0;
Edistvalue_bl =0;
wsize+=1;
for (int i = t_row+1; i <= t_row+wsize; i++) {
for (int j = t_col-wsize; j < t_col; j++) {
int index = i * msize + j;
if (!isnan(c[index]) && i+diff-j<0) {
Evalue_bl += c[index];
Edistvalue_bl += d[abs(i+diff-j)];
if (i > t_row && i < t_row+pwidth+1 && j > t_col-pwidth-1 && j < t_col) {
Evalue_bl -= c[index];
Edistvalue_bl -= d[abs(i+diff-j)];
}
}
}
}
if (wsize >= buffer_width) {
break;
}
if (2*wsize>= abs(t_row+diff-t_col)) {
break;
}
}
// calculate donut
for (int i = t_row-wsize; i <= t_row+wsize; ++i) {
for (int j = t_col-wsize; j <= t_col+wsize; ++j) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_donut += c[index];
Edistvalue_donut += d[abs(i+diff-j)];
}
}
}
}
//Subtract off the middle peak
for (int i = t_row-pwidth; i <= t_row+pwidth; ++i) {
for (int j = t_col-pwidth; j <= t_col+pwidth; ++j) {
int index = i * msize + j;
if (!isnan(c[index])) {
if (i+diff-j<0) {
Evalue_donut -= c[index];
Edistvalue_donut -= d[abs(i+diff-j)];
}
}
}
}
//Subtract off the cross hairs left side
process_masks_lr(t_row-wsize, t_row-pwidth, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v);
//Subtract off the cross hairs right side
process_masks_lr(t_row+pwidth+1, t_row+wsize+1, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v);
//Subtract off the cross hairs top side
process_masks_tb(t_col-wsize, t_col-pwidth, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h);
//Subtract off the cross hairs bottom side
process_masks_tb(t_col+pwidth+1, t_col+wsize+1, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h);
e_bl = ((Evalue_bl*d[diagDist])/Edistvalue_bl)*kr1[t_row]*kr2[t_col];
e_donut = ((Evalue_donut*d[diagDist])/Edistvalue_donut)*kr1[t_row]*kr2[t_col];
e_h = ((Evalue_h*d[diagDist])/Edistvalue_h)*kr1[t_row]*kr2[t_col];
e_v = ((Evalue_v*d[diagDist])/Edistvalue_v)*kr1[t_row]*kr2[t_col];
float lognorm = logf(powf(2.0,.33));
ensure_appropriate_values(e_bl, lognorm, &bvalue_bl);
ensure_appropriate_values(e_donut, lognorm, &bvalue_donut);
ensure_appropriate_values(e_h, lognorm, &bvalue_h);
ensure_appropriate_values(e_v, lognorm, &bvalue_v);
// Write the matrix to device memory;
// each thread writes one element
int val_index = t_row * msize + t_col;
expectedbl[val_index] = e_bl;
expecteddonut[val_index] = e_donut;
expectedh[val_index] = e_h;
expectedv[val_index] = e_v;
o = roundf(c[val_index]*kr1[t_row]*kr2[t_col]);
observed[val_index] = o;
b_bl[val_index] = bvalue_bl;
b_donut[val_index] = bvalue_donut;
b_h[val_index] = bvalue_h;
b_v[val_index] = bvalue_v;
sbtrkt = fmaxf(tbl[(int) bvalue_bl],td[(int) bvalue_donut]);
sbtrkt = fmaxf(sbtrkt, th[(int) bvalue_h]);
sbtrkt = fmaxf(sbtrkt, tv[(int) bvalue_v]);
p[val_index] = o-sbtrkt;
}
}
|
0366289da02417d7ed29908a25a844dc094b0515.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace mydl {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace mydl
|
0366289da02417d7ed29908a25a844dc094b0515.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace mydl {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace mydl
|
04939592a165d57f8b92e3495f2841fb7775c7ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <thrust/scan.h>
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
typedef unsigned long long int size_int;
using namespace std;
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, hiprandState_t* states) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(hiprandState_t* states, float* numbers) {
/* hiprand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = hiprand_uniform(&states[blockIdx.x]);
}
__global__ void binary_search_id(size_int *sample_idx, float *numbers, float *prefix_sum, unsigned int N, size_int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
int l = 0;
int r = n - 1;
float k = numbers[tid];
int mid;
while (l < r){
mid = (l + r) / 2;
if(prefix_sum[mid] < k)
l = mid + 1;
else
r = mid;
}
sample_idx[tid] = r;
}
}
/*
void random_generator(unsigned int N, float *cpu_nums)
{
//CUDA's random number library uses hiprandState_t to keep track of the seed value we will store a random state for every thread
hiprandState_t* states;
// allocate space on the GPU for the random states
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
// invoke the GPU to initialize all of the random states
hipLaunchKernelGGL(( init), dim3(N), dim3(1), 0, 0, time(0), states);
// allocate an array of unsigned ints on the CPU and GPU
float* gpu_nums;
hipMalloc((void**) &gpu_nums, N * sizeof(float));
// invoke the kernel to get some random numbers
hipLaunchKernelGGL(( randoms), dim3(N), dim3(1), 0, 0, states, gpu_nums, 100);
// copy the random numbers back
hipMemcpy(cpu_nums, gpu_nums, N * sizeof(float), hipMemcpyDeviceToHost);
// free the memory we allocated for the states and numbers
hipFree(states);
hipFree(gpu_nums);
}
*/
void random_weight_sample_cuda(unsigned int N, size_int *sample_idx, float *weights, size_int n){
//Compute the prefix sum of weights
float prefix_sum_weights[n];
thrust::inclusive_scan(weights, weights + n, prefix_sum_weights); // out-place scan
// Generate N random numbers, between (0,1]
hiprandState_t* states;
/* allocate space on the GPU for the random states */
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
/* invoke the GPU to initialize all of the random states */
hipLaunchKernelGGL(( init), dim3(N), dim3(1), 0, 0, time(0), states);
/* allocate an array of unsigned ints on the CPU and GPU */
float* gpu_nums;
hipMalloc((void**) &gpu_nums, N * sizeof(float));
/* invoke the kernel to get some random numbers */
hipLaunchKernelGGL(( randoms), dim3(N), dim3(1), 0, 0, states, gpu_nums);
//allocate gpu array for d_weights and d_sample_idx
float* d_weights;
hipMalloc((void**) &d_weights, n * sizeof(float));
size_int* d_sample_idx;
hipMalloc((void**) &d_sample_idx, N * sizeof(size_int));
//copy weights array to d_weights
hipMemcpy(d_weights, prefix_sum_weights, sizeof(float) * n, hipMemcpyHostToDevice);
int block_size = 256;
int grid_size = (N + block_size - 1)/block_size; // ensure that we call enough thread
hipLaunchKernelGGL(( binary_search_id), dim3(grid_size), dim3(block_size), 0, 0, d_sample_idx, gpu_nums, d_weights, N, n);
//copy d_sample_idx back to CPU
hipMemcpy(sample_idx, d_sample_idx, N * sizeof(size_int), hipMemcpyDeviceToHost);
/* free the memory we allocated for the states and numbers */
hipFree(states);
hipFree(gpu_nums);
hipFree(d_weights);
hipFree(d_sample_idx);
}
|
04939592a165d57f8b92e3495f2841fb7775c7ae.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <thrust/scan.h>
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
typedef unsigned long long int size_int;
using namespace std;
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(curandState_t* states, float* numbers) {
/* curand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = curand_uniform(&states[blockIdx.x]);
}
__global__ void binary_search_id(size_int *sample_idx, float *numbers, float *prefix_sum, unsigned int N, size_int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
int l = 0;
int r = n - 1;
float k = numbers[tid];
int mid;
while (l < r){
mid = (l + r) / 2;
if(prefix_sum[mid] < k)
l = mid + 1;
else
r = mid;
}
sample_idx[tid] = r;
}
}
/*
void random_generator(unsigned int N, float *cpu_nums)
{
//CUDA's random number library uses curandState_t to keep track of the seed value we will store a random state for every thread
curandState_t* states;
// allocate space on the GPU for the random states
cudaMalloc((void**) &states, N * sizeof(curandState_t));
// invoke the GPU to initialize all of the random states
init<<<N, 1>>>(time(0), states);
// allocate an array of unsigned ints on the CPU and GPU
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, N * sizeof(float));
// invoke the kernel to get some random numbers
randoms<<<N, 1>>>(states, gpu_nums, 100);
// copy the random numbers back
cudaMemcpy(cpu_nums, gpu_nums, N * sizeof(float), cudaMemcpyDeviceToHost);
// free the memory we allocated for the states and numbers
cudaFree(states);
cudaFree(gpu_nums);
}
*/
void random_weight_sample_cuda(unsigned int N, size_int *sample_idx, float *weights, size_int n){
//Compute the prefix sum of weights
float prefix_sum_weights[n];
thrust::inclusive_scan(weights, weights + n, prefix_sum_weights); // out-place scan
// Generate N random numbers, between (0,1]
curandState_t* states;
/* allocate space on the GPU for the random states */
cudaMalloc((void**) &states, N * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
init<<<N, 1>>>(time(0), states);
/* allocate an array of unsigned ints on the CPU and GPU */
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, N * sizeof(float));
/* invoke the kernel to get some random numbers */
randoms<<<N, 1>>>(states, gpu_nums);
//allocate gpu array for d_weights and d_sample_idx
float* d_weights;
cudaMalloc((void**) &d_weights, n * sizeof(float));
size_int* d_sample_idx;
cudaMalloc((void**) &d_sample_idx, N * sizeof(size_int));
//copy weights array to d_weights
cudaMemcpy(d_weights, prefix_sum_weights, sizeof(float) * n, cudaMemcpyHostToDevice);
int block_size = 256;
int grid_size = (N + block_size - 1)/block_size; // ensure that we call enough thread
binary_search_id<<<grid_size, block_size>>>(d_sample_idx, gpu_nums, d_weights, N, n);
//copy d_sample_idx back to CPU
cudaMemcpy(sample_idx, d_sample_idx, N * sizeof(size_int), cudaMemcpyDeviceToHost);
/* free the memory we allocated for the states and numbers */
cudaFree(states);
cudaFree(gpu_nums);
cudaFree(d_weights);
cudaFree(d_sample_idx);
}
|
9d778677591a72c8ff5d88a6a49e3eb02903fb3c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* A simple example of using a structore of arrays to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* SoA: contiguous reads for x and y
*/
#define LEN 1<<22
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(hipMalloc((InnerArray**)&d_A, nBytes));
CHECK(hipMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = seconds();
hipLaunchKernelGGL(( warmup2), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
iStart = seconds();
hipLaunchKernelGGL(( testInnerArray), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
|
9d778677591a72c8ff5d88a6a49e3eb02903fb3c.cu
|
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* A simple example of using a structore of arrays to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* SoA: contiguous reads for x and y
*/
#define LEN 1<<22
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(cudaMalloc((InnerArray**)&d_A, nBytes));
CHECK(cudaMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = seconds();
warmup2<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
iStart = seconds();
testInnerArray<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
5b954384be0f12dad856903701ba2abe34e449ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void
tearKernel(void *vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int zEWS;
// __shared__ int tadRank;
__shared__ Nd4jLong numTads;
// __shared__ int zRank;
// __shared__ Nd4jLong *tadShape;
// __shared__ Nd4jLong *tadStride;
// __shared__ Nd4jLong const* zShape;
// __shared__ Nd4jLong const* zStride;
__shared__ T* x;
if (threadIdx.x == 0) {
tadLength = shape::length(tadShapeInfo);
tadEWS = shape::elementWiseStride(tadShapeInfo);
zEWS = shape::elementWiseStride(zShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
x = static_cast<T *>(vx);
}
__syncthreads();
for (Nd4jLong r = blockIdx.x; r < numTads; r += gridDim.x) {
T *z = (T *) targets[r];
T *s = x + tadOffsets[r];
if (zEWS > 0 && tadEWS > 0) {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x)
z[i * zEWS] = s[i * tadEWS];
} else {
for (Nd4jLong j = threadIdx.x; j < tadLength; j += blockDim.x) {
auto xOffset = shape::getIndexOffset(j, tadShapeInfo);
auto zOffset = shape::getIndexOffset(j, zShapeInfo);
z[zOffset] = s[xOffset];
}
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void
execTearKernel(void *vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
tearKernel<T>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void tearKernelGeneric(dim3 &launchDims, hipStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
Nd4jPointer *targets, Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
hipLaunchKernelGGL(( execTearKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "tear(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL tearKernelGeneric, (dim3 & launchDims, hipStream_t * stream, void * vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets), LIBND4J_TYPES);
}
|
5b954384be0f12dad856903701ba2abe34e449ce.cu
|
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void
tearKernel(void *vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int zEWS;
// __shared__ int tadRank;
__shared__ Nd4jLong numTads;
// __shared__ int zRank;
// __shared__ Nd4jLong *tadShape;
// __shared__ Nd4jLong *tadStride;
// __shared__ Nd4jLong const* zShape;
// __shared__ Nd4jLong const* zStride;
__shared__ T* x;
if (threadIdx.x == 0) {
tadLength = shape::length(tadShapeInfo);
tadEWS = shape::elementWiseStride(tadShapeInfo);
zEWS = shape::elementWiseStride(zShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
x = static_cast<T *>(vx);
}
__syncthreads();
for (Nd4jLong r = blockIdx.x; r < numTads; r += gridDim.x) {
T *z = (T *) targets[r];
T *s = x + tadOffsets[r];
if (zEWS > 0 && tadEWS > 0) {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x)
z[i * zEWS] = s[i * tadEWS];
} else {
for (Nd4jLong j = threadIdx.x; j < tadLength; j += blockDim.x) {
auto xOffset = shape::getIndexOffset(j, tadShapeInfo);
auto zOffset = shape::getIndexOffset(j, zShapeInfo);
z[zOffset] = s[xOffset];
}
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void
execTearKernel(void *vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
tearKernel<T>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void tearKernelGeneric(dim3 &launchDims, cudaStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
Nd4jPointer *targets, Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
execTearKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "tear(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL tearKernelGeneric, (dim3 & launchDims, cudaStream_t * stream, void * vx, Nd4jLong const* xShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets), LIBND4J_TYPES);
}
|
a2f3dacb77d7f281bfd272d46c17fd1993c7f726.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
#include "texture_binder.hpp"
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min (weight_prev + 1, volume.max_weight);
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist;
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (hipGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (hipGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
|
a2f3dacb77d7f281bfd272d46c17fd1993c7f726.cu
|
#include "device.hpp"
#include "texture_binder.hpp"
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min (weight_prev + 1, volume.max_weight);
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist;
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (cudaGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (cudaGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
b0b8354a3e1ac3d5e39fdc8d109290a0ec0cd506.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Author: Prasun Gera [email protected]
#include <cugraph.h>
#include <rmm_utils.h>
#include <algorithm>
#include "traversal_common.cuh"
#include "sssp.cuh"
#include "sssp_kernels_hip.cuh"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::setup() {
// Working data
// Each vertex can be in the frontier at most once
ALLOC_TRY(&frontier, n * sizeof(IndexType), nullptr);
ALLOC_TRY(&new_frontier, n * sizeof(IndexType), nullptr);
// size of bitmaps for vertices
vertices_bmap_size = (n / (8 * sizeof(int)) + 1);
// size of bitmaps for edges
edges_bmap_size = (nnz / (8 * sizeof(int)) + 1);
// ith bit of isolated_bmap is set <=> degree of ith vertex = 0
ALLOC_TRY(&isolated_bmap, sizeof(int) * vertices_bmap_size, nullptr);
// Allocate buffer for data that need to be reset every iteration
iter_buffer_size =
sizeof(int) * (edges_bmap_size + vertices_bmap_size) + sizeof(IndexType);
ALLOC_TRY(&iter_buffer, iter_buffer_size, nullptr);
// ith bit of relaxed_edges_bmap <=> ith edge was relaxed
relaxed_edges_bmap = (int*)iter_buffer;
// ith bit of next_frontier_bmap <=> vertex is active in the next frontier
next_frontier_bmap = (int*)iter_buffer + edges_bmap_size;
// num vertices in the next frontier
d_new_frontier_cnt = next_frontier_bmap + vertices_bmap_size;
// vertices_degree[i] = degree of vertex i
ALLOC_TRY(&vertex_degree, sizeof(IndexType) * n, nullptr);
// Cub working data
traversal::cub_exclusive_sum_alloc(
n + 1, d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes);
// frontier_vertex_degree[i] is the degree of vertex frontier[i]
ALLOC_TRY(&frontier_vertex_degree, n * sizeof(IndexType), nullptr);
// exclusive sum of frontier_vertex_degree
ALLOC_TRY(&exclusive_sum_frontier_vertex_degree,
(n + 1) * sizeof(IndexType),
nullptr);
// We use buckets of edges (32 edges per bucket for now, see exact macro in
// sssp_kernels). frontier_vertex_degree_buckets_offsets[i] is the index k
// such as frontier[k] is the source of the first edge of the bucket
// See top down kernels for more details
size_t bucket_off_size =
((nnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2) *
sizeof(IndexType);
ALLOC_TRY(&exclusive_sum_frontier_vertex_buckets_offsets,
bucket_off_size,
nullptr);
// Repurpose d_new_frontier_cnt temporarily
IndexType* d_nisolated = d_new_frontier_cnt;
hipMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream);
// Computing isolated_bmap
// Only dependent on graph - not source vertex - done once
traversal::flag_isolated_vertices(
n, isolated_bmap, row_offsets, vertex_degree, d_nisolated, stream);
hipMemcpyAsync(&nisolated,
d_nisolated,
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
// We need nisolated to be ready to use
// nisolated is the number of isolated (zero out-degree) vertices
hipStreamSynchronize(stream);
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::configure(DistType* _distances,
IndexType* _predecessors,
int* _edge_mask) {
distances = _distances;
predecessors = _predecessors;
edge_mask = _edge_mask;
useEdgeMask = (edge_mask != NULL);
computeDistances = (distances != NULL);
computePredecessors = (predecessors != NULL);
// We need distances for SSSP even if the caller doesn't need them
if (!computeDistances)
ALLOC_TRY(&distances, n * sizeof(DistType), nullptr);
// Need next_distances in either case
ALLOC_TRY(&next_distances, n * sizeof(DistType), nullptr);
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::traverse(IndexType source_vertex) {
// Init distances to infinities
traversal::fill_vec(distances, n, traversal::vec_t<DistType>::max, stream);
traversal::fill_vec(
next_distances, n, traversal::vec_t<DistType>::max, stream);
// If needed, set all predecessors to non-existent (-1)
if (computePredecessors) {
hipMemsetAsync(predecessors, -1, n * sizeof(IndexType), stream);
}
//
// Initial frontier
//
hipMemsetAsync(&distances[source_vertex], 0, sizeof(DistType), stream);
hipMemsetAsync(&next_distances[source_vertex], 0, sizeof(DistType), stream);
int current_isolated_bmap_source_vert = 0;
hipMemcpyAsync(¤t_isolated_bmap_source_vert,
&isolated_bmap[source_vertex / INT_SIZE],
sizeof(int),
hipMemcpyDeviceToHost);
// We need current_isolated_bmap_source_vert
hipStreamSynchronize(stream);
int m = (1 << (source_vertex % INT_SIZE));
// If source is isolated (zero outdegree), we are done
if ((m & current_isolated_bmap_source_vert)) {
// Init distances and predecessors are done; stream is synchronized
}
// Adding source_vertex to init frontier
hipMemcpyAsync(&frontier[0],
&source_vertex,
sizeof(IndexType),
hipMemcpyHostToDevice,
stream);
// Number of vertices in the frontier and number of out-edges from the
// frontier
IndexType mf, nf;
nf = 1;
int iters = 0;
while (nf > 0) {
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree, nf, stream);
traversal::exclusive_sum(d_cub_exclusive_sum_storage,
cub_exclusive_sum_storage_bytes,
frontier_vertex_degree,
exclusive_sum_frontier_vertex_degree,
nf + 1,
stream);
hipMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
// We need mf to know the next kernel's launch dims
hipStreamSynchronize(stream);
traversal::compute_bucket_offsets(
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets,
nf,
mf,
stream);
// Reset the transient structures to 0
hipMemsetAsync(iter_buffer, 0, iter_buffer_size, stream);
sssp_kernels::frontier_expand(
row_offsets,
col_indices,
edge_weights,
frontier,
nf,
mf,
new_frontier,
d_new_frontier_cnt,
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets,
distances,
next_distances,
predecessors,
edge_mask,
next_frontier_bmap,
relaxed_edges_bmap,
isolated_bmap,
stream);
hipMemcpyAsync(&nf,
d_new_frontier_cnt,
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
// Copy next_distances to distances
hipMemcpyAsync(distances,
next_distances,
n * sizeof(DistType),
hipMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
// We need nf for the loop
hipStreamSynchronize(stream);
// Swap frontiers
IndexType* tmp = frontier;
frontier = new_frontier;
new_frontier = tmp;
iters++;
if (iters > n) {
// Bail out. Got a graph with a negative cycle
CUGRAPH_FAIL("ERROR: Max iterations exceeded. Check the graph for negative weight cycles");
}
}
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::clean() {
// the vectors have a destructor that takes care of cleaning
ALLOC_FREE_TRY(frontier, nullptr);
ALLOC_FREE_TRY(new_frontier, nullptr);
ALLOC_FREE_TRY(isolated_bmap, nullptr);
ALLOC_FREE_TRY(vertex_degree, nullptr);
ALLOC_FREE_TRY(d_cub_exclusive_sum_storage, nullptr);
ALLOC_FREE_TRY(frontier_vertex_degree, nullptr);
ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_degree, nullptr);
ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_buckets_offsets, nullptr);
ALLOC_FREE_TRY(iter_buffer, nullptr);
// Distances were working data
if (!computeDistances)
ALLOC_FREE_TRY(distances, nullptr);
// next_distances were working data
ALLOC_FREE_TRY(next_distances, nullptr);
}
} //namespace
/**
* ---------------------------------------------------------------------------*
* @brief Native sssp with predecessors
*
* @file sssp.cu
* --------------------------------------------------------------------------*/
void sssp(Graph* gdf_G,
gdf_column* sssp_distances,
gdf_column* predecessors,
const int source_vert) {
CUGRAPH_EXPECTS(gdf_G->adjList != nullptr, "Invalid API parameter");
void *sssp_dist_ptr, *pred_ptr;
// NOTE: gdf_column struct doesn't have a default constructor. So we can get
// garbage values for member fields. Right now, it's the caller's
// responsibility to ensure that the fields are initialised if the gdf_column
// ptr is not null
sssp_dist_ptr = (sssp_distances && sssp_distances->size)
? sssp_distances->data
: nullptr;
pred_ptr =
(predecessors && predecessors->size) ? predecessors->data : nullptr;
CUGRAPH_EXPECTS(sssp_dist_ptr || pred_ptr, "Invalid API parameter");
if (sssp_dist_ptr) {
CUGRAPH_EXPECTS(!sssp_distances->valid, "Column must be valid");
// Integral types are possible, but we don't want to deal with overflow
// conditions right now
CUGRAPH_EXPECTS(sssp_distances->dtype == GDF_FLOAT32 ||
sssp_distances->dtype == GDF_FLOAT64,
"Invalid API parameter");
}
CUGRAPH_EXPECTS(gdf_G->adjList->offsets->dtype == GDF_INT32,
"Unsupported data type");
CUGRAPH_EXPECTS(gdf_G->adjList->indices->dtype == GDF_INT32,
"Unsupported data type");
if (pred_ptr)
CUGRAPH_EXPECTS(predecessors->dtype == gdf_G->adjList->indices->dtype,
"Unsupported data type");
if (sssp_dist_ptr)
CUGRAPH_EXPECTS(gdf_G->adjList->offsets->size - 1 <= sssp_distances->size,
"Invalid API parameter");
if (!gdf_G->adjList->edge_data) {
// Generate unit weights
// TODO: This should fallback to BFS, but for now it'll go through the
// SSSP path since BFS needs the directed flag, which should not be
// necessary for the SSSP API. We can pass directed to the BFS call, but
// BFS also does only integer distances right now whereas we need float or
// double
void* d_edge_data;
gdf_G->adjList->edge_data = new gdf_column;
hipStream_t stream{nullptr};
// If distances array is given and is double, generate the weights in
// double
if (sssp_dist_ptr && sssp_distances->dtype == GDF_FLOAT64) {
std::vector<double> h_edge_data(gdf_G->adjList->indices->size, 1.0);
size_t edge_data_size = sizeof(double) * h_edge_data.size();
ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream);
CUDA_TRY(hipMemcpy(d_edge_data,
&h_edge_data[0],
edge_data_size,
hipMemcpyHostToDevice));
gdf_column_view(gdf_G->adjList->edge_data,
d_edge_data,
nullptr,
gdf_G->adjList->indices->size,
GDF_FLOAT64);
} else {
// Else generate float
std::vector<float> h_edge_data(gdf_G->adjList->indices->size, 1.0);
size_t edge_data_size = sizeof(float) * h_edge_data.size();
ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream);
CUDA_TRY(hipMemcpy(d_edge_data,
&h_edge_data[0],
edge_data_size,
hipMemcpyHostToDevice));
gdf_column_view(gdf_G->adjList->edge_data,
d_edge_data,
nullptr,
gdf_G->adjList->indices->size,
GDF_FLOAT32);
}
} else {
// Got weighted graph
CUGRAPH_EXPECTS(
gdf_G->adjList->edge_data->size == gdf_G->adjList->indices->size,
"Invalid API parameter");
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 ||
gdf_G->adjList->edge_data->dtype == GDF_FLOAT64,
"Invalid API parameter");
if (sssp_dist_ptr)
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == sssp_distances->dtype,
"Unsupported data type");
// SSSP is not defined for graphs with negative weight cycles
// Warn user about any negative edges
if (gdf_G->prop && gdf_G->prop->has_negative_edges == GDF_PROP_TRUE)
std::cerr << "WARN: The graph has negative weight edges. SSSP will not "
"converge if the graph has negative weight cycles\n";
}
int n = gdf_G->adjList->offsets->size - 1;
int e = gdf_G->adjList->indices->size;
int* offsets_ptr = (int*)gdf_G->adjList->offsets->data;
int* indices_ptr = (int*)gdf_G->adjList->indices->data;
void* edge_weights_ptr = static_cast<void*>(gdf_G->adjList->edge_data->data);
if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT32) {
cugraph::detail::SSSP<int, float> sssp(
n, e, offsets_ptr, indices_ptr, static_cast<float*>(edge_weights_ptr));
sssp.configure(static_cast<float*>(sssp_dist_ptr),
static_cast<int*>(pred_ptr),
nullptr);
sssp.traverse(source_vert);
} else if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT64) {
cugraph::detail::SSSP<int, double> sssp(n,
e,
offsets_ptr,
indices_ptr,
static_cast<double*>(edge_weights_ptr));
sssp.configure(static_cast<double*>(sssp_dist_ptr),
static_cast<int*>(pred_ptr),
nullptr);
sssp.traverse(source_vert);
} else {
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 ||
gdf_G->adjList->edge_data->dtype == GDF_FLOAT64,
"Invalid API parameter");
}
}
} //namespace
|
b0b8354a3e1ac3d5e39fdc8d109290a0ec0cd506.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Author: Prasun Gera [email protected]
#include <cugraph.h>
#include <rmm_utils.h>
#include <algorithm>
#include "traversal_common.cuh"
#include "sssp.cuh"
#include "sssp_kernels.cuh"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::setup() {
// Working data
// Each vertex can be in the frontier at most once
ALLOC_TRY(&frontier, n * sizeof(IndexType), nullptr);
ALLOC_TRY(&new_frontier, n * sizeof(IndexType), nullptr);
// size of bitmaps for vertices
vertices_bmap_size = (n / (8 * sizeof(int)) + 1);
// size of bitmaps for edges
edges_bmap_size = (nnz / (8 * sizeof(int)) + 1);
// ith bit of isolated_bmap is set <=> degree of ith vertex = 0
ALLOC_TRY(&isolated_bmap, sizeof(int) * vertices_bmap_size, nullptr);
// Allocate buffer for data that need to be reset every iteration
iter_buffer_size =
sizeof(int) * (edges_bmap_size + vertices_bmap_size) + sizeof(IndexType);
ALLOC_TRY(&iter_buffer, iter_buffer_size, nullptr);
// ith bit of relaxed_edges_bmap <=> ith edge was relaxed
relaxed_edges_bmap = (int*)iter_buffer;
// ith bit of next_frontier_bmap <=> vertex is active in the next frontier
next_frontier_bmap = (int*)iter_buffer + edges_bmap_size;
// num vertices in the next frontier
d_new_frontier_cnt = next_frontier_bmap + vertices_bmap_size;
// vertices_degree[i] = degree of vertex i
ALLOC_TRY(&vertex_degree, sizeof(IndexType) * n, nullptr);
// Cub working data
traversal::cub_exclusive_sum_alloc(
n + 1, d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes);
// frontier_vertex_degree[i] is the degree of vertex frontier[i]
ALLOC_TRY(&frontier_vertex_degree, n * sizeof(IndexType), nullptr);
// exclusive sum of frontier_vertex_degree
ALLOC_TRY(&exclusive_sum_frontier_vertex_degree,
(n + 1) * sizeof(IndexType),
nullptr);
// We use buckets of edges (32 edges per bucket for now, see exact macro in
// sssp_kernels). frontier_vertex_degree_buckets_offsets[i] is the index k
// such as frontier[k] is the source of the first edge of the bucket
// See top down kernels for more details
size_t bucket_off_size =
((nnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2) *
sizeof(IndexType);
ALLOC_TRY(&exclusive_sum_frontier_vertex_buckets_offsets,
bucket_off_size,
nullptr);
// Repurpose d_new_frontier_cnt temporarily
IndexType* d_nisolated = d_new_frontier_cnt;
cudaMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream);
// Computing isolated_bmap
// Only dependent on graph - not source vertex - done once
traversal::flag_isolated_vertices(
n, isolated_bmap, row_offsets, vertex_degree, d_nisolated, stream);
cudaMemcpyAsync(&nisolated,
d_nisolated,
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
// We need nisolated to be ready to use
// nisolated is the number of isolated (zero out-degree) vertices
cudaStreamSynchronize(stream);
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::configure(DistType* _distances,
IndexType* _predecessors,
int* _edge_mask) {
distances = _distances;
predecessors = _predecessors;
edge_mask = _edge_mask;
useEdgeMask = (edge_mask != NULL);
computeDistances = (distances != NULL);
computePredecessors = (predecessors != NULL);
// We need distances for SSSP even if the caller doesn't need them
if (!computeDistances)
ALLOC_TRY(&distances, n * sizeof(DistType), nullptr);
// Need next_distances in either case
ALLOC_TRY(&next_distances, n * sizeof(DistType), nullptr);
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::traverse(IndexType source_vertex) {
// Init distances to infinities
traversal::fill_vec(distances, n, traversal::vec_t<DistType>::max, stream);
traversal::fill_vec(
next_distances, n, traversal::vec_t<DistType>::max, stream);
// If needed, set all predecessors to non-existent (-1)
if (computePredecessors) {
cudaMemsetAsync(predecessors, -1, n * sizeof(IndexType), stream);
}
//
// Initial frontier
//
cudaMemsetAsync(&distances[source_vertex], 0, sizeof(DistType), stream);
cudaMemsetAsync(&next_distances[source_vertex], 0, sizeof(DistType), stream);
int current_isolated_bmap_source_vert = 0;
cudaMemcpyAsync(¤t_isolated_bmap_source_vert,
&isolated_bmap[source_vertex / INT_SIZE],
sizeof(int),
cudaMemcpyDeviceToHost);
// We need current_isolated_bmap_source_vert
cudaStreamSynchronize(stream);
int m = (1 << (source_vertex % INT_SIZE));
// If source is isolated (zero outdegree), we are done
if ((m & current_isolated_bmap_source_vert)) {
// Init distances and predecessors are done; stream is synchronized
}
// Adding source_vertex to init frontier
cudaMemcpyAsync(&frontier[0],
&source_vertex,
sizeof(IndexType),
cudaMemcpyHostToDevice,
stream);
// Number of vertices in the frontier and number of out-edges from the
// frontier
IndexType mf, nf;
nf = 1;
int iters = 0;
while (nf > 0) {
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree, nf, stream);
traversal::exclusive_sum(d_cub_exclusive_sum_storage,
cub_exclusive_sum_storage_bytes,
frontier_vertex_degree,
exclusive_sum_frontier_vertex_degree,
nf + 1,
stream);
cudaMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
// We need mf to know the next kernel's launch dims
cudaStreamSynchronize(stream);
traversal::compute_bucket_offsets(
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets,
nf,
mf,
stream);
// Reset the transient structures to 0
cudaMemsetAsync(iter_buffer, 0, iter_buffer_size, stream);
sssp_kernels::frontier_expand(
row_offsets,
col_indices,
edge_weights,
frontier,
nf,
mf,
new_frontier,
d_new_frontier_cnt,
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets,
distances,
next_distances,
predecessors,
edge_mask,
next_frontier_bmap,
relaxed_edges_bmap,
isolated_bmap,
stream);
cudaMemcpyAsync(&nf,
d_new_frontier_cnt,
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
// Copy next_distances to distances
cudaMemcpyAsync(distances,
next_distances,
n * sizeof(DistType),
cudaMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
// We need nf for the loop
cudaStreamSynchronize(stream);
// Swap frontiers
IndexType* tmp = frontier;
frontier = new_frontier;
new_frontier = tmp;
iters++;
if (iters > n) {
// Bail out. Got a graph with a negative cycle
CUGRAPH_FAIL("ERROR: Max iterations exceeded. Check the graph for negative weight cycles");
}
}
}
template <typename IndexType, typename DistType>
void SSSP<IndexType, DistType>::clean() {
// the vectors have a destructor that takes care of cleaning
ALLOC_FREE_TRY(frontier, nullptr);
ALLOC_FREE_TRY(new_frontier, nullptr);
ALLOC_FREE_TRY(isolated_bmap, nullptr);
ALLOC_FREE_TRY(vertex_degree, nullptr);
ALLOC_FREE_TRY(d_cub_exclusive_sum_storage, nullptr);
ALLOC_FREE_TRY(frontier_vertex_degree, nullptr);
ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_degree, nullptr);
ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_buckets_offsets, nullptr);
ALLOC_FREE_TRY(iter_buffer, nullptr);
// Distances were working data
if (!computeDistances)
ALLOC_FREE_TRY(distances, nullptr);
// next_distances were working data
ALLOC_FREE_TRY(next_distances, nullptr);
}
} //namespace
/**
* ---------------------------------------------------------------------------*
* @brief Native sssp with predecessors
*
* @file sssp.cu
* --------------------------------------------------------------------------*/
void sssp(Graph* gdf_G,
gdf_column* sssp_distances,
gdf_column* predecessors,
const int source_vert) {
CUGRAPH_EXPECTS(gdf_G->adjList != nullptr, "Invalid API parameter");
void *sssp_dist_ptr, *pred_ptr;
// NOTE: gdf_column struct doesn't have a default constructor. So we can get
// garbage values for member fields. Right now, it's the caller's
// responsibility to ensure that the fields are initialised if the gdf_column
// ptr is not null
sssp_dist_ptr = (sssp_distances && sssp_distances->size)
? sssp_distances->data
: nullptr;
pred_ptr =
(predecessors && predecessors->size) ? predecessors->data : nullptr;
CUGRAPH_EXPECTS(sssp_dist_ptr || pred_ptr, "Invalid API parameter");
if (sssp_dist_ptr) {
CUGRAPH_EXPECTS(!sssp_distances->valid, "Column must be valid");
// Integral types are possible, but we don't want to deal with overflow
// conditions right now
CUGRAPH_EXPECTS(sssp_distances->dtype == GDF_FLOAT32 ||
sssp_distances->dtype == GDF_FLOAT64,
"Invalid API parameter");
}
CUGRAPH_EXPECTS(gdf_G->adjList->offsets->dtype == GDF_INT32,
"Unsupported data type");
CUGRAPH_EXPECTS(gdf_G->adjList->indices->dtype == GDF_INT32,
"Unsupported data type");
if (pred_ptr)
CUGRAPH_EXPECTS(predecessors->dtype == gdf_G->adjList->indices->dtype,
"Unsupported data type");
if (sssp_dist_ptr)
CUGRAPH_EXPECTS(gdf_G->adjList->offsets->size - 1 <= sssp_distances->size,
"Invalid API parameter");
if (!gdf_G->adjList->edge_data) {
// Generate unit weights
// TODO: This should fallback to BFS, but for now it'll go through the
// SSSP path since BFS needs the directed flag, which should not be
// necessary for the SSSP API. We can pass directed to the BFS call, but
// BFS also does only integer distances right now whereas we need float or
// double
void* d_edge_data;
gdf_G->adjList->edge_data = new gdf_column;
cudaStream_t stream{nullptr};
// If distances array is given and is double, generate the weights in
// double
if (sssp_dist_ptr && sssp_distances->dtype == GDF_FLOAT64) {
std::vector<double> h_edge_data(gdf_G->adjList->indices->size, 1.0);
size_t edge_data_size = sizeof(double) * h_edge_data.size();
ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream);
CUDA_TRY(cudaMemcpy(d_edge_data,
&h_edge_data[0],
edge_data_size,
cudaMemcpyHostToDevice));
gdf_column_view(gdf_G->adjList->edge_data,
d_edge_data,
nullptr,
gdf_G->adjList->indices->size,
GDF_FLOAT64);
} else {
// Else generate float
std::vector<float> h_edge_data(gdf_G->adjList->indices->size, 1.0);
size_t edge_data_size = sizeof(float) * h_edge_data.size();
ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream);
CUDA_TRY(cudaMemcpy(d_edge_data,
&h_edge_data[0],
edge_data_size,
cudaMemcpyHostToDevice));
gdf_column_view(gdf_G->adjList->edge_data,
d_edge_data,
nullptr,
gdf_G->adjList->indices->size,
GDF_FLOAT32);
}
} else {
// Got weighted graph
CUGRAPH_EXPECTS(
gdf_G->adjList->edge_data->size == gdf_G->adjList->indices->size,
"Invalid API parameter");
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 ||
gdf_G->adjList->edge_data->dtype == GDF_FLOAT64,
"Invalid API parameter");
if (sssp_dist_ptr)
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == sssp_distances->dtype,
"Unsupported data type");
// SSSP is not defined for graphs with negative weight cycles
// Warn user about any negative edges
if (gdf_G->prop && gdf_G->prop->has_negative_edges == GDF_PROP_TRUE)
std::cerr << "WARN: The graph has negative weight edges. SSSP will not "
"converge if the graph has negative weight cycles\n";
}
int n = gdf_G->adjList->offsets->size - 1;
int e = gdf_G->adjList->indices->size;
int* offsets_ptr = (int*)gdf_G->adjList->offsets->data;
int* indices_ptr = (int*)gdf_G->adjList->indices->data;
void* edge_weights_ptr = static_cast<void*>(gdf_G->adjList->edge_data->data);
if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT32) {
cugraph::detail::SSSP<int, float> sssp(
n, e, offsets_ptr, indices_ptr, static_cast<float*>(edge_weights_ptr));
sssp.configure(static_cast<float*>(sssp_dist_ptr),
static_cast<int*>(pred_ptr),
nullptr);
sssp.traverse(source_vert);
} else if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT64) {
cugraph::detail::SSSP<int, double> sssp(n,
e,
offsets_ptr,
indices_ptr,
static_cast<double*>(edge_weights_ptr));
sssp.configure(static_cast<double*>(sssp_dist_ptr),
static_cast<int*>(pred_ptr),
nullptr);
sssp.traverse(source_vert);
} else {
CUGRAPH_EXPECTS(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 ||
gdf_G->adjList->edge_data->dtype == GDF_FLOAT64,
"Invalid API parameter");
}
}
} //namespace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.