hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
57cfdadf09e7ed08cd06bb7e5c70107292132597.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
namespace phi {
namespace sparse {
template <typename T>
inline __device__ bool DevIsZero(const T* data, const int64_t cols) {
const T zero = static_cast<T>(0);
// TODO(zhangkaihuo): check the data is zero or not in parallen when cols > 1
for (int64_t i = 0; i < cols; i++) {
if (data[i] != zero) {
return false;
}
}
return true;
}
template <typename T>
__global__ void GetNonZeroNums(const T* dense_data,
const int rows,
const int cols,
int* non_zero_num,
int* temp_indexs) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int counter;
if (threadIdx.x == 0) counter = 0;
__syncthreads();
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
int index = -1;
// TODO(zhangkaihuo): when cols=1, vectorization can be used
if (!DevIsZero(dense_data + i * cols, cols)) {
// use reductions?
atomicAdd(&counter, 1);
index = i;
}
temp_indexs[i] = index;
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(non_zero_num, counter);
}
}
template <typename T>
__global__ void GetNonZeroElementsAndIndices(const T* dense_data,
const int64_t sparse_dim,
const int64_t cols,
const int64_t* x_dims,
const int non_zero_num,
const int* indexs,
int64_t* indices,
T* sparse_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t sparse_index = indexs[i];
int64_t x_index = sparse_index;
for (int64_t j = sparse_dim - 1; j >= 0; j--) {
indices[j * non_zero_num + i] = sparse_index % x_dims[j];
sparse_index /= x_dims[j];
}
for (int j = 0; j < cols; j++) {
sparse_data[i * cols + j] = dense_data[x_index * cols + j];
}
}
}
template <typename T, typename Context>
void DenseToSparseCooKernel(const Context& dev_ctx,
const DenseTensor& x,
const int64_t sparse_dim,
SparseCooTensor* out) {
const T* x_data = x.data<T>();
const auto& x_dims = x.dims();
auto dims_2d = flatten_to_2d(x_dims, sparse_dim);
const int rows = dims_2d[0];
const int cols = dims_2d[1];
auto nums_meta =
phi::DenseTensorMeta(DataType::INT32, {1}, phi::DataLayout::NCHW);
DenseTensor nums = phi::Empty(dev_ctx, std::move(nums_meta));
auto x_dims_meta = phi::DenseTensorMeta(DataType::INT64,
{static_cast<int64_t>(x_dims.size())},
phi::DataLayout::NCHW);
DenseTensor d_x_dims = phi::Empty(dev_ctx, std::move(x_dims_meta));
const auto place = dev_ctx.GetPlace();
// 1. get numbers of non zero elements, and get the index of non zero elements
int* nums_ptr = nums.mutable_data<int>(place);
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(nums_ptr, 0, sizeof(int), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(nums_ptr, 0, sizeof(int), dev_ctx.stream()));
#endif
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, rows, &grid_size, &block_size);
auto temp_indexs_meta =
phi::DenseTensorMeta(DataType::INT32, {rows}, phi::DataLayout::NCHW);
DenseTensor temp_indexs = phi::Empty(dev_ctx, std::move(temp_indexs_meta));
int* temp_indexs_ptr = temp_indexs.mutable_data<int>(place);
hipLaunchKernelGGL(( GetNonZeroNums), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
x_data, rows, cols, nums_ptr, temp_indexs_ptr);
#ifdef PADDLE_WITH_HIP
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#endif
temp_indexs_ptr,
temp_indexs_ptr + rows,
-1);
// 2. copy non_zero_num to host, copy x_dims to device
int non_zero_num = 0;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
hipMemcpyDeviceToHost,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
hipMemcpyDeviceToHost,
dev_ctx.stream()));
#endif
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_x_dims.mutable_data<int64_t>(place),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
hipMemcpyHostToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_x_dims.mutable_data<int64_t>(place),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
hipMemcpyHostToDevice,
dev_ctx.stream()));
#endif
dev_ctx.Wait(); // wait the copy
const auto values_dims = InferDenseDims(x_dims, sparse_dim, non_zero_num);
DenseTensorMeta indices_meta(DataType::INT64,
{sparse_dim, static_cast<int64_t>(non_zero_num)},
DataLayout::NCHW);
DenseTensorMeta values_meta(x.meta().dtype, values_dims, x.meta().layout);
phi::DenseTensor indices(
phi::make_intrusive<paddle::experimental::SharedStorage>(
dev_ctx.GetPlace()),
std::move(indices_meta));
phi::DenseTensor values(
phi::make_intrusive<paddle::experimental::SharedStorage>(
dev_ctx.GetPlace()),
std::move(values_meta));
int64_t* indices_data = indices.mutable_data<int64_t>(place);
T* sparse_data = values.mutable_data<T>(place);
// 3. calc indices by indexs and get values by indexs
GetGpuLaunchConfig1D(dev_ctx, non_zero_num, &grid_size, &block_size);
hipLaunchKernelGGL(( GetNonZeroElementsAndIndices), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
x_data,
sparse_dim,
cols,
d_x_dims.data<int64_t>(),
non_zero_num,
temp_indexs_ptr,
indices_data,
sparse_data);
out->SetMember(indices, values, x_dims, true);
}
__global__ void GetBatchSizes(const int64_t* crows,
const int rows,
const int batchs,
int* batch_sizes) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < batchs) {
batch_sizes[tid] = crows[tid * (rows + 1) + rows];
}
}
__global__ void ConvertCsrCrowsToCooRows(const int64_t* crows_ptr,
const int* crows_offsets,
int64_t* rows_ptr,
int64_t* batch_ptr,
const int rows) {
const int b = blockIdx.y;
const int64_t offset = crows_offsets ? crows_offsets[b] : 0;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
for (int j = crows_ptr[b * (rows + 1) + i];
j < crows_ptr[b * (rows + 1) + i + 1];
j++) {
rows_ptr[offset + j] = i;
if (batch_ptr) {
batch_ptr[offset + j] = b;
}
}
}
}
template <typename T, typename Context>
void SparseCsrToCooKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
const DDim& x_dims = x.dims();
const int64_t non_zero_num = x.non_zero_cols().numel();
const auto& csr_crows = x.non_zero_crows();
const auto& csr_cols = x.non_zero_cols();
const auto& csr_values = x.non_zero_elements();
const int64_t* csr_crows_data = csr_crows.data<int64_t>();
const int64_t* csr_cols_data = csr_cols.data<int64_t>();
const T* csr_values_data = csr_values.data<T>();
int64_t sparse_dim = 2;
if (x_dims.size() == 3) {
sparse_dim = 3;
}
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
const auto place = dev_ctx.GetPlace();
DenseTensorMeta indices_meta(
DataType::INT64, {sparse_dim, non_zero_num}, DataLayout::NCHW);
DenseTensorMeta values_meta(x.dtype(), {non_zero_num}, x.layout());
DenseTensorMeta offsets_meta(DataType::INT32, {batchs}, DataLayout::NCHW);
DenseTensor indices = phi::Empty(dev_ctx, std::move(indices_meta));
DenseTensor values = phi::Empty(dev_ctx, std::move(values_meta));
DenseTensor offsets = phi::Empty(dev_ctx, std::move(offsets_meta));
int64_t* coo_indices = indices.mutable_data<int64_t>(place);
int64_t* batch_ptr = x_dims.size() == 2 ? nullptr : coo_indices;
int64_t* coo_rows_data =
x_dims.size() == 2 ? coo_indices : batch_ptr + non_zero_num;
int64_t* coo_cols_data = coo_rows_data + non_zero_num;
int* offsets_ptr = batchs == 1 ? nullptr : offsets.mutable_data<int>(place);
T* coo_values_data = values.mutable_data<T>(place);
int grid_size = 1, block_size = 1;
if (batchs > 1) {
GetGpuLaunchConfig1D(dev_ctx, batchs, &grid_size, &block_size);
hipLaunchKernelGGL(( GetBatchSizes), dim3(grid_size), dim3(block_size), 0, 0,
csr_crows_data, rows, batchs, offsets_ptr);
#ifdef PADDLE_WITH_HIP
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#endif
offsets_ptr,
offsets_ptr + batchs,
offsets_ptr);
}
GetGpuLaunchConfig1D(dev_ctx, rows, &grid_size, &block_size);
dim3 grids(grid_size, batchs, 1);
hipLaunchKernelGGL(( ConvertCsrCrowsToCooRows), dim3(grids), dim3(block_size), 0, 0,
csr_crows_data, offsets_ptr, coo_rows_data, batch_ptr, rows);
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#endif
out->SetMember(indices, values, x_dims, true);
}
__global__ void GetBatchsOffset(const int64_t* batchs_ptr,
const int non_zero_num,
int64_t* batchs_offset) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
if (i == non_zero_num - 1 || batchs_ptr[i] != batchs_ptr[i + 1]) {
batchs_offset[batchs_ptr[i]] = i + 1;
}
}
}
__global__ void ConvertCooRowsToCsrCrows(
const int64_t* batchs_offset, // can be null if batchs = 1
const int64_t* coo_rows_data,
int64_t* csr_crows_data,
const int rows,
const int64_t non_zero_num) {
const int b = blockIdx.y;
int batch_non_zero_num =
batchs_offset == nullptr ? non_zero_num : batchs_offset[b];
if (batch_non_zero_num == 0) return;
int batch_start = 0;
if (b > 0) {
batch_start = batchs_offset[b - 1];
batch_non_zero_num -= batch_start;
}
auto* coo_rows_ptr = coo_rows_data + batch_start;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < batch_non_zero_num; i += gridDim.x * blockDim.x) {
if (i == 0) {
for (int j = 0; j <= coo_rows_ptr[0]; j++) {
csr_crows_data[b * (rows + 1) + j] = 0;
}
} else {
for (int j = coo_rows_ptr[i - 1]; j < coo_rows_ptr[i]; j++) {
csr_crows_data[b * (rows + 1) + j + 1] = i;
}
}
if (i == batch_non_zero_num - 1) {
for (int64_t i = coo_rows_ptr[batch_non_zero_num - 1] + 1; i < rows + 1;
i++) {
csr_crows_data[b * (rows + 1) + i] = batch_non_zero_num;
}
}
}
}
template <typename T, typename Context>
void SparseCooToCsrKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
const auto& x_dims = x.dims();
bool valid = x_dims.size() == 2 || x_dims.size() == 3;
PADDLE_ENFORCE_EQ(valid,
true,
phi::errors::InvalidArgument(
"SparseCsrTensor only support 2-D or 3-D matrix"));
const int64_t non_zero_num = x.nnz();
if (non_zero_num <= 0) return;
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
const auto place = dev_ctx.GetPlace();
DenseTensorMeta crows_meta(
DataType::INT64, {batchs * (rows + 1)}, DataLayout::NCHW);
DenseTensorMeta cols_meta(DataType::INT64, {non_zero_num}, DataLayout::NCHW);
DenseTensorMeta values_meta(x.dtype(), {non_zero_num}, x.layout());
phi::DenseTensor non_zero_crows(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(crows_meta));
phi::DenseTensor non_zero_cols(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(cols_meta));
phi::DenseTensor non_zero_elements(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(values_meta));
int64_t* csr_crows_data = non_zero_crows.mutable_data<int64_t>(place);
int64_t* csr_cols_data = non_zero_cols.mutable_data<int64_t>(place);
T* csr_values_data = non_zero_elements.mutable_data<T>(place);
const auto& coo_indices = x.non_zero_indices();
const auto& coo_values = x.non_zero_elements();
const int64_t* batchs_ptr = coo_indices.data<int64_t>();
const int64_t* coo_rows_data =
batchs == 1 ? batchs_ptr : batchs_ptr + non_zero_num;
const int64_t* coo_cols_data = coo_rows_data + non_zero_num;
const T* coo_values_data = coo_values.data<T>();
if (!x.coalesced()) {
// TODO(zhangkahuo): call coalesced() to distinct and sort the indices
}
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, batchs, &grid_size, &block_size);
if (batchs > 1) {
DenseTensorMeta batchs_meta(DataType::INT64, {batchs}, DataLayout::NCHW);
phi::DenseTensor batchs_offset(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(batchs_meta));
int64_t* batchs_offset_ptr = batchs_offset.mutable_data<int64_t>(place);
hipLaunchKernelGGL(( GetBatchsOffset), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
batchs_ptr, non_zero_num, batchs_offset_ptr);
dim3 grids(grid_size, batchs, 1);
hipLaunchKernelGGL(( ConvertCooRowsToCsrCrows), dim3(grids), dim3(block_size), 0, dev_ctx.stream(),
batchs_offset_ptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
} else {
hipLaunchKernelGGL(( ConvertCooRowsToCsrCrows), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
nullptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
}
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#endif
out->SetMember(non_zero_crows, non_zero_cols, non_zero_elements, x_dims);
}
template <typename ValueT, typename IndicesT>
__global__ void KernelSparseCooToDense(const IndicesT* indices,
const IndicesT* sparse_offsets,
const ValueT* data,
ValueT* dense_data,
const IndicesT non_zero_num,
const int64_t base_offset,
const int64_t sparse_dim) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices[j * non_zero_num + i] * sparse_offsets[j];
}
for (int j = 0; j < base_offset; j++) {
dense_data[index * base_offset + j] = data[i * base_offset + j];
}
}
}
template <typename T, typename Context>
void SparseCooToDenseKernel(const Context& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
const auto non_zero_num = x.nnz();
const auto dense_dims = x.dims();
const auto indices = x.non_zero_indices();
const auto values = x.non_zero_elements();
const auto indices_dims = indices.dims();
int64_t sparse_dim = indices_dims[0];
if (indices_dims.size() == 1) {
sparse_dim = 1;
}
const int64_t dense_dim = values.dims().size() - 1;
const auto place = dev_ctx.GetPlace();
const T* x_data = values.data<T>();
T* out_data = out->mutable_data<T>(place);
int64_t base_offset = 1;
for (int64_t i = 0; i < dense_dim; i++) {
base_offset *= dense_dims[sparse_dim + i];
}
std::vector<int64_t> sparse_offsets(sparse_dim);
int64_t offset = 1;
for (int i = sparse_dim - 1; i >= 0; i--) {
sparse_offsets[i] = offset;
offset *= dense_dims[i];
}
auto sparse_offset_meta = phi::DenseTensorMeta(
DataType::INT64, {sparse_dim}, phi::DataLayout::NCHW);
DenseTensor d_sparse_offsets = Empty(dev_ctx, std::move(sparse_offset_meta));
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_sparse_offsets.mutable_data<int64_t>(place),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
hipMemcpyHostToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_sparse_offsets.mutable_data<int64_t>(place),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
hipMemcpyHostToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream()));
#endif
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, non_zero_num, &grid_size, &block_size);
hipLaunchKernelGGL(( KernelSparseCooToDense<
T,
int64_t>), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
indices.data<int64_t>(),
d_sparse_offsets.data<int64_t>(),
x_data,
out_data,
non_zero_num,
base_offset,
sparse_dim);
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(dense_to_sparse_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(dense_to_sparse_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
| 57cfdadf09e7ed08cd06bb7e5c70107292132597.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
namespace phi {
namespace sparse {
template <typename T>
inline __device__ bool DevIsZero(const T* data, const int64_t cols) {
const T zero = static_cast<T>(0);
// TODO(zhangkaihuo): check the data is zero or not in parallen when cols > 1
for (int64_t i = 0; i < cols; i++) {
if (data[i] != zero) {
return false;
}
}
return true;
}
template <typename T>
__global__ void GetNonZeroNums(const T* dense_data,
const int rows,
const int cols,
int* non_zero_num,
int* temp_indexs) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int counter;
if (threadIdx.x == 0) counter = 0;
__syncthreads();
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
int index = -1;
// TODO(zhangkaihuo): when cols=1, vectorization can be used
if (!DevIsZero(dense_data + i * cols, cols)) {
// use reductions?
atomicAdd(&counter, 1);
index = i;
}
temp_indexs[i] = index;
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(non_zero_num, counter);
}
}
template <typename T>
__global__ void GetNonZeroElementsAndIndices(const T* dense_data,
const int64_t sparse_dim,
const int64_t cols,
const int64_t* x_dims,
const int non_zero_num,
const int* indexs,
int64_t* indices,
T* sparse_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t sparse_index = indexs[i];
int64_t x_index = sparse_index;
for (int64_t j = sparse_dim - 1; j >= 0; j--) {
indices[j * non_zero_num + i] = sparse_index % x_dims[j];
sparse_index /= x_dims[j];
}
for (int j = 0; j < cols; j++) {
sparse_data[i * cols + j] = dense_data[x_index * cols + j];
}
}
}
template <typename T, typename Context>
void DenseToSparseCooKernel(const Context& dev_ctx,
const DenseTensor& x,
const int64_t sparse_dim,
SparseCooTensor* out) {
const T* x_data = x.data<T>();
const auto& x_dims = x.dims();
auto dims_2d = flatten_to_2d(x_dims, sparse_dim);
const int rows = dims_2d[0];
const int cols = dims_2d[1];
auto nums_meta =
phi::DenseTensorMeta(DataType::INT32, {1}, phi::DataLayout::NCHW);
DenseTensor nums = phi::Empty(dev_ctx, std::move(nums_meta));
auto x_dims_meta = phi::DenseTensorMeta(DataType::INT64,
{static_cast<int64_t>(x_dims.size())},
phi::DataLayout::NCHW);
DenseTensor d_x_dims = phi::Empty(dev_ctx, std::move(x_dims_meta));
const auto place = dev_ctx.GetPlace();
// 1. get numbers of non zero elements, and get the index of non zero elements
int* nums_ptr = nums.mutable_data<int>(place);
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(nums_ptr, 0, sizeof(int), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemsetAsync(nums_ptr, 0, sizeof(int), dev_ctx.stream()));
#endif
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, rows, &grid_size, &block_size);
auto temp_indexs_meta =
phi::DenseTensorMeta(DataType::INT32, {rows}, phi::DataLayout::NCHW);
DenseTensor temp_indexs = phi::Empty(dev_ctx, std::move(temp_indexs_meta));
int* temp_indexs_ptr = temp_indexs.mutable_data<int>(place);
GetNonZeroNums<<<grid_size, block_size, 0, dev_ctx.stream()>>>(
x_data, rows, cols, nums_ptr, temp_indexs_ptr);
#ifdef PADDLE_WITH_HIP
thrust::remove(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::remove(thrust::cuda::par.on(dev_ctx.stream()),
#endif
temp_indexs_ptr,
temp_indexs_ptr + rows,
-1);
// 2. copy non_zero_num to host, copy x_dims to device
int non_zero_num = 0;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
hipMemcpyDeviceToHost,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(&non_zero_num,
nums_ptr,
sizeof(int),
cudaMemcpyDeviceToHost,
dev_ctx.stream()));
#endif
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_x_dims.mutable_data<int64_t>(place),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
hipMemcpyHostToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemcpyAsync(d_x_dims.mutable_data<int64_t>(place),
x_dims.Get(),
x_dims.size() * sizeof(x_dims[0]),
cudaMemcpyHostToDevice,
dev_ctx.stream()));
#endif
dev_ctx.Wait(); // wait the copy
const auto values_dims = InferDenseDims(x_dims, sparse_dim, non_zero_num);
DenseTensorMeta indices_meta(DataType::INT64,
{sparse_dim, static_cast<int64_t>(non_zero_num)},
DataLayout::NCHW);
DenseTensorMeta values_meta(x.meta().dtype, values_dims, x.meta().layout);
phi::DenseTensor indices(
phi::make_intrusive<paddle::experimental::SharedStorage>(
dev_ctx.GetPlace()),
std::move(indices_meta));
phi::DenseTensor values(
phi::make_intrusive<paddle::experimental::SharedStorage>(
dev_ctx.GetPlace()),
std::move(values_meta));
int64_t* indices_data = indices.mutable_data<int64_t>(place);
T* sparse_data = values.mutable_data<T>(place);
// 3. calc indices by indexs and get values by indexs
GetGpuLaunchConfig1D(dev_ctx, non_zero_num, &grid_size, &block_size);
GetNonZeroElementsAndIndices<<<grid_size, block_size, 0, dev_ctx.stream()>>>(
x_data,
sparse_dim,
cols,
d_x_dims.data<int64_t>(),
non_zero_num,
temp_indexs_ptr,
indices_data,
sparse_data);
out->SetMember(indices, values, x_dims, true);
}
__global__ void GetBatchSizes(const int64_t* crows,
const int rows,
const int batchs,
int* batch_sizes) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < batchs) {
batch_sizes[tid] = crows[tid * (rows + 1) + rows];
}
}
__global__ void ConvertCsrCrowsToCooRows(const int64_t* crows_ptr,
const int* crows_offsets,
int64_t* rows_ptr,
int64_t* batch_ptr,
const int rows) {
const int b = blockIdx.y;
const int64_t offset = crows_offsets ? crows_offsets[b] : 0;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < rows; i += gridDim.x * blockDim.x) {
for (int j = crows_ptr[b * (rows + 1) + i];
j < crows_ptr[b * (rows + 1) + i + 1];
j++) {
rows_ptr[offset + j] = i;
if (batch_ptr) {
batch_ptr[offset + j] = b;
}
}
}
}
template <typename T, typename Context>
void SparseCsrToCooKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
SparseCooTensor* out) {
const DDim& x_dims = x.dims();
const int64_t non_zero_num = x.non_zero_cols().numel();
const auto& csr_crows = x.non_zero_crows();
const auto& csr_cols = x.non_zero_cols();
const auto& csr_values = x.non_zero_elements();
const int64_t* csr_crows_data = csr_crows.data<int64_t>();
const int64_t* csr_cols_data = csr_cols.data<int64_t>();
const T* csr_values_data = csr_values.data<T>();
int64_t sparse_dim = 2;
if (x_dims.size() == 3) {
sparse_dim = 3;
}
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
const auto place = dev_ctx.GetPlace();
DenseTensorMeta indices_meta(
DataType::INT64, {sparse_dim, non_zero_num}, DataLayout::NCHW);
DenseTensorMeta values_meta(x.dtype(), {non_zero_num}, x.layout());
DenseTensorMeta offsets_meta(DataType::INT32, {batchs}, DataLayout::NCHW);
DenseTensor indices = phi::Empty(dev_ctx, std::move(indices_meta));
DenseTensor values = phi::Empty(dev_ctx, std::move(values_meta));
DenseTensor offsets = phi::Empty(dev_ctx, std::move(offsets_meta));
int64_t* coo_indices = indices.mutable_data<int64_t>(place);
int64_t* batch_ptr = x_dims.size() == 2 ? nullptr : coo_indices;
int64_t* coo_rows_data =
x_dims.size() == 2 ? coo_indices : batch_ptr + non_zero_num;
int64_t* coo_cols_data = coo_rows_data + non_zero_num;
int* offsets_ptr = batchs == 1 ? nullptr : offsets.mutable_data<int>(place);
T* coo_values_data = values.mutable_data<T>(place);
int grid_size = 1, block_size = 1;
if (batchs > 1) {
GetGpuLaunchConfig1D(dev_ctx, batchs, &grid_size, &block_size);
GetBatchSizes<<<grid_size, block_size>>>(
csr_crows_data, rows, batchs, offsets_ptr);
#ifdef PADDLE_WITH_HIP
thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()),
#else
thrust::exclusive_scan(thrust::cuda::par.on(dev_ctx.stream()),
#endif
offsets_ptr,
offsets_ptr + batchs,
offsets_ptr);
}
GetGpuLaunchConfig1D(dev_ctx, rows, &grid_size, &block_size);
dim3 grids(grid_size, batchs, 1);
ConvertCsrCrowsToCooRows<<<grids, block_size>>>(
csr_crows_data, offsets_ptr, coo_rows_data, batch_ptr, rows);
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(coo_cols_data,
csr_cols_data,
sizeof(int64_t) * non_zero_num,
cudaMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(coo_values_data,
csr_values_data,
sizeof(T) * non_zero_num,
cudaMemcpyDeviceToDevice,
dev_ctx.stream()));
#endif
out->SetMember(indices, values, x_dims, true);
}
__global__ void GetBatchsOffset(const int64_t* batchs_ptr,
const int non_zero_num,
int64_t* batchs_offset) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
if (i == non_zero_num - 1 || batchs_ptr[i] != batchs_ptr[i + 1]) {
batchs_offset[batchs_ptr[i]] = i + 1;
}
}
}
__global__ void ConvertCooRowsToCsrCrows(
const int64_t* batchs_offset, // can be null if batchs = 1
const int64_t* coo_rows_data,
int64_t* csr_crows_data,
const int rows,
const int64_t non_zero_num) {
const int b = blockIdx.y;
int batch_non_zero_num =
batchs_offset == nullptr ? non_zero_num : batchs_offset[b];
if (batch_non_zero_num == 0) return;
int batch_start = 0;
if (b > 0) {
batch_start = batchs_offset[b - 1];
batch_non_zero_num -= batch_start;
}
auto* coo_rows_ptr = coo_rows_data + batch_start;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < batch_non_zero_num; i += gridDim.x * blockDim.x) {
if (i == 0) {
for (int j = 0; j <= coo_rows_ptr[0]; j++) {
csr_crows_data[b * (rows + 1) + j] = 0;
}
} else {
for (int j = coo_rows_ptr[i - 1]; j < coo_rows_ptr[i]; j++) {
csr_crows_data[b * (rows + 1) + j + 1] = i;
}
}
if (i == batch_non_zero_num - 1) {
for (int64_t i = coo_rows_ptr[batch_non_zero_num - 1] + 1; i < rows + 1;
i++) {
csr_crows_data[b * (rows + 1) + i] = batch_non_zero_num;
}
}
}
}
template <typename T, typename Context>
void SparseCooToCsrKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCsrTensor* out) {
const auto& x_dims = x.dims();
bool valid = x_dims.size() == 2 || x_dims.size() == 3;
PADDLE_ENFORCE_EQ(valid,
true,
phi::errors::InvalidArgument(
"SparseCsrTensor only support 2-D or 3-D matrix"));
const int64_t non_zero_num = x.nnz();
if (non_zero_num <= 0) return;
int batchs = x_dims.size() == 2 ? 1 : x_dims[0];
int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1];
const auto place = dev_ctx.GetPlace();
DenseTensorMeta crows_meta(
DataType::INT64, {batchs * (rows + 1)}, DataLayout::NCHW);
DenseTensorMeta cols_meta(DataType::INT64, {non_zero_num}, DataLayout::NCHW);
DenseTensorMeta values_meta(x.dtype(), {non_zero_num}, x.layout());
phi::DenseTensor non_zero_crows(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(crows_meta));
phi::DenseTensor non_zero_cols(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(cols_meta));
phi::DenseTensor non_zero_elements(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(values_meta));
int64_t* csr_crows_data = non_zero_crows.mutable_data<int64_t>(place);
int64_t* csr_cols_data = non_zero_cols.mutable_data<int64_t>(place);
T* csr_values_data = non_zero_elements.mutable_data<T>(place);
const auto& coo_indices = x.non_zero_indices();
const auto& coo_values = x.non_zero_elements();
const int64_t* batchs_ptr = coo_indices.data<int64_t>();
const int64_t* coo_rows_data =
batchs == 1 ? batchs_ptr : batchs_ptr + non_zero_num;
const int64_t* coo_cols_data = coo_rows_data + non_zero_num;
const T* coo_values_data = coo_values.data<T>();
if (!x.coalesced()) {
// TODO(zhangkahuo): call coalesced() to distinct and sort the indices
}
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, batchs, &grid_size, &block_size);
if (batchs > 1) {
DenseTensorMeta batchs_meta(DataType::INT64, {batchs}, DataLayout::NCHW);
phi::DenseTensor batchs_offset(
phi::make_intrusive<paddle::experimental::SharedStorage>(place),
std::move(batchs_meta));
int64_t* batchs_offset_ptr = batchs_offset.mutable_data<int64_t>(place);
GetBatchsOffset<<<grid_size, block_size, 0, dev_ctx.stream()>>>(
batchs_ptr, non_zero_num, batchs_offset_ptr);
dim3 grids(grid_size, batchs, 1);
ConvertCooRowsToCsrCrows<<<grids, block_size, 0, dev_ctx.stream()>>>(
batchs_offset_ptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
} else {
ConvertCooRowsToCsrCrows<<<grid_size, block_size, 0, dev_ctx.stream()>>>(
nullptr, coo_rows_data, csr_crows_data, rows, non_zero_num);
}
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(int64_t) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
hipMemcpyDeviceToDevice,
dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(csr_cols_data,
coo_cols_data,
sizeof(int64_t) * non_zero_num,
cudaMemcpyDeviceToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(csr_values_data,
coo_values_data,
sizeof(T) * non_zero_num,
cudaMemcpyDeviceToDevice,
dev_ctx.stream()));
#endif
out->SetMember(non_zero_crows, non_zero_cols, non_zero_elements, x_dims);
}
template <typename ValueT, typename IndicesT>
__global__ void KernelSparseCooToDense(const IndicesT* indices,
const IndicesT* sparse_offsets,
const ValueT* data,
ValueT* dense_data,
const IndicesT non_zero_num,
const int64_t base_offset,
const int64_t sparse_dim) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) {
int64_t index = 0;
for (int j = 0; j < sparse_dim; j++) {
index += indices[j * non_zero_num + i] * sparse_offsets[j];
}
for (int j = 0; j < base_offset; j++) {
dense_data[index * base_offset + j] = data[i * base_offset + j];
}
}
}
template <typename T, typename Context>
void SparseCooToDenseKernel(const Context& dev_ctx,
const SparseCooTensor& x,
DenseTensor* out) {
const auto non_zero_num = x.nnz();
const auto dense_dims = x.dims();
const auto indices = x.non_zero_indices();
const auto values = x.non_zero_elements();
const auto indices_dims = indices.dims();
int64_t sparse_dim = indices_dims[0];
if (indices_dims.size() == 1) {
sparse_dim = 1;
}
const int64_t dense_dim = values.dims().size() - 1;
const auto place = dev_ctx.GetPlace();
const T* x_data = values.data<T>();
T* out_data = out->mutable_data<T>(place);
int64_t base_offset = 1;
for (int64_t i = 0; i < dense_dim; i++) {
base_offset *= dense_dims[sparse_dim + i];
}
std::vector<int64_t> sparse_offsets(sparse_dim);
int64_t offset = 1;
for (int i = sparse_dim - 1; i >= 0; i--) {
sparse_offsets[i] = offset;
offset *= dense_dims[i];
}
auto sparse_offset_meta = phi::DenseTensorMeta(
DataType::INT64, {sparse_dim}, phi::DataLayout::NCHW);
DenseTensor d_sparse_offsets = Empty(dev_ctx, std::move(sparse_offset_meta));
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(d_sparse_offsets.mutable_data<int64_t>(place),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
hipMemcpyHostToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemcpyAsync(d_sparse_offsets.mutable_data<int64_t>(place),
sparse_offsets.data(),
sparse_dim * sizeof(int64_t),
cudaMemcpyHostToDevice,
dev_ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemsetAsync(out_data, 0, sizeof(T) * out->numel(), dev_ctx.stream()));
#endif
int grid_size = 1, block_size = 1;
GetGpuLaunchConfig1D(dev_ctx, non_zero_num, &grid_size, &block_size);
KernelSparseCooToDense<
T,
int64_t><<<grid_size, block_size, 0, dev_ctx.stream()>>>(
indices.data<int64_t>(),
d_sparse_offsets.data<int64_t>(),
x_data,
out_data,
non_zero_num,
base_offset,
sparse_dim);
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(dense_to_sparse_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(dense_to_sparse_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_coo_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(sparse_csr_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
float,
double,
phi::dtype::float16,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
|
427a59b97d7bd981acbe45711388ad288ea7f6d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReflectionPadding.cu"
#else
void THNN_(SpatialReflectionPadding_updateOutput)(THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input,
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
THArgCheck(padL < inputW && padR < inputW, 4,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padL, padR, dimw, THCTensor_(sizeDesc)(state, input).str);
THArgCheck(padT < inputH && padB < inputH, 6,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padT, padB, dimh, THCTensor_(sizeDesc)(state, input).str);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1, 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReflectionPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, padT, padB, padL, padR);
THCudaCheck(hipGetLastError());
}
void THNN_(SpatialReflectionPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReflectionPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, padT, padB, padL, padR);
THCudaCheck(hipGetLastError());
}
#endif
| 427a59b97d7bd981acbe45711388ad288ea7f6d4.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReflectionPadding.cu"
#else
void THNN_(SpatialReflectionPadding_updateOutput)(THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input,
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
THArgCheck(padL < inputW && padR < inputW, 4,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padL, padR, dimw, THCTensor_(sizeDesc)(state, input).str);
THArgCheck(padT < inputH && padB < inputH, 6,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padT, padB, dimh, THCTensor_(sizeDesc)(state, input).str);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1, 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReflectionPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, padT, padB, padL, padR);
THCudaCheck(cudaGetLastError());
}
void THNN_(SpatialReflectionPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReflectionPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, padT, padB, padL, padR);
THCudaCheck(cudaGetLastError());
}
#endif
|
c3a8e0557fe64afd1ce7b35e6b7bb849e8ea78e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template<typename U>
void Internal::UP::operator()(U& vector){
std::reverse(vector.begin(),vector.end());
};
template<typename U>
void Internal::DOWN::operator()(U& vector){};
//***************************************************
template<typename D,typename V,typename T,typename L>
void Internal::shift_functions<D,V,T,L>::set(int n){
increment=n;
};
template<typename D,typename V,typename T,typename L>
int Internal::shift_functions<D,V,T,L>::next_size(Internal::shift_functions<D,V,T,L>::cordinate p){
int x=p.offset();
int a=p.width()-x;
int b=p.width()-op(x,increment)%p.width();
return ::min(a,b);
};
template<typename D,typename V,typename T,typename L>
Internal::shift_functions<D,V,T,L>::cordinate
Internal::shift_functions<D,V,T,L>::next(
Internal::shift_functions<D,V,T,L>::cordinate p)
{
return p+=next_size(p);
};
template<typename D,typename V,typename T,typename L>
void Internal::shift_functions<D,V,T,L>::adjust(V& vector){
direction(vector);
};
template<typename pointer,unsigned int blockSize>
__global__ void Internal::tree_leave_equality( pointer p_1,
pointer p_2,
bool* result,
int size){
extern __shared__ bool sdata[];
//load shared data
unsigned int tid=threadIdx.x;
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int gridSize=blockSize*2*gridDim.x;
sdata[tid]=true;
while(i<size){
bool comp_1=(p_1[i]==p_2[i]);
bool comp_2=(p_1[i+blockDim.x]==p_2[i+blockDim.x]);
sdata[tid]=(comp_1 && comp_2 );
i+=gridSize;
}
__syncthreads();
//reduce in shared mem
if(blockSize>=512){
if(tid<256){
sdata[tid]&=sdata[tid+256];
}
__syncthreads();
}
if(blockSize>=256){
if(tid<128){
sdata[tid]&=sdata[tid+128];
}
__syncthreads();
}
if(blockSize>=128){
if(tid<64){
sdata[tid]&=sdata[tid+64];
}
__syncthreads();
}
if(tid<32){
if(blockSize>=64)sdata[tid]&=sdata[tid+32];
if(blockSize>=32)sdata[tid]&=sdata[tid+16];
if(blockSize>=16)sdata[tid]&=sdata[tid+8];
if(blockSize>=8)sdata[tid]&=sdata[tid+4];
if(blockSize>=4)sdata[tid]&=sdata[tid+2];
if(blockSize>=2)sdata[tid]&=sdata[tid+1];
}
//write result to global mem
if (tid==0) result[blockIdx.x]=sdata[0];
};
/************************************equality operator******************/
template<typename A,typename B,typename C,typename D>
bool Internal::Equality_false<A,B,C,D>::operator()( const Tree<A,B>& tree_1,
const Tree<C,D>& tree_2){
return false;
};
template<typename T,typename L>
bool Internal::Equality_device<T,L>::operator()( const Tree<T,L>& tree_1,
const Tree<T,L>& tree_2){
int width=tree_1.width();
typedef typename Tree<T,L>::pointer pointer;
typedef Memory::location<Memory::Region::device> Location;
Location location;
if(tree_1.width()==tree_2.width()){
int grid=tree_1._cudaMingridSize;
int block=tree_1._cudaBlockSize;
bool result=true;
bool* result_temp=static_cast<bool*>(location.New(sizeof(bool)*grid));
bool result_a[grid];
int width=tree_1.width();
for(int i=0; i<width; i++){
pointer ptr_1=tree_1.getbranch(i);
pointer ptr_2=tree_2.getbranch(i);
#define kernel(x) case x: \
hipLaunchKernelGGL(( Internal::tree_leave_equality<pointer,x>) \
, dim3(grid),dim3(block),block, 0, ptr_1,ptr_2,result_temp,width); \
break;
if(ptr_1 && ptr_2){
switch(block){
kernel(512)
kernel(256)
kernel(128)
kernel(64)
kernel(32)
kernel(16)
kernel(8)
kernel(4)
kernel(2)
kernel(1)
}
#undef kernel
hipDeviceSynchronize();
location.MemCopy(result_temp,result_a,sizeof(bool)*grid);
result=std::any_of(result_a,result_a+grid, [](bool b){return ~b;} );
}else{
bool p_1=ptr_1;
bool p_2=ptr_2;
if(p_1 xor p_2)
result=false;
else
result=true;
};
if(!result)
break;
}
location.Delete(result_temp);
return result;
}else{
return false;
}
};
template<typename T,typename L>
bool Internal::Equality_host<T,L>::operator()( const Tree<T,L>& tree_1,
const Tree<T,L>& tree_2){
typedef typename Tree<T,L>::pointer pointer;
if(tree_1.width()==tree_2.width()){
int width=tree_2.width();
bool result=true;
for(int i=0; i<width; i++){
pointer ptr_1=tree_1.getbranch(i);
pointer ptr_2=tree_2.getbranch(i);
if(ptr_1 && ptr_2){
result=std::equal( ptr_1,
ptr_1+width,
ptr_2
);
}else{
bool p_1=ptr_1;
bool p_2=ptr_2;
if(p_1 xor p_2)
result=false;
else
result=true;
}
if(!result)
break;
}
return result;
}else{
return false;
}
};
| c3a8e0557fe64afd1ce7b35e6b7bb849e8ea78e6.cu | template<typename U>
void Internal::UP::operator()(U& vector){
std::reverse(vector.begin(),vector.end());
};
template<typename U>
void Internal::DOWN::operator()(U& vector){};
//***************************************************
template<typename D,typename V,typename T,typename L>
void Internal::shift_functions<D,V,T,L>::set(int n){
increment=n;
};
template<typename D,typename V,typename T,typename L>
int Internal::shift_functions<D,V,T,L>::next_size(Internal::shift_functions<D,V,T,L>::cordinate p){
int x=p.offset();
int a=p.width()-x;
int b=p.width()-op(x,increment)%p.width();
return std::min(a,b);
};
template<typename D,typename V,typename T,typename L>
Internal::shift_functions<D,V,T,L>::cordinate
Internal::shift_functions<D,V,T,L>::next(
Internal::shift_functions<D,V,T,L>::cordinate p)
{
return p+=next_size(p);
};
template<typename D,typename V,typename T,typename L>
void Internal::shift_functions<D,V,T,L>::adjust(V& vector){
direction(vector);
};
template<typename pointer,unsigned int blockSize>
__global__ void Internal::tree_leave_equality( pointer p_1,
pointer p_2,
bool* result,
int size){
extern __shared__ bool sdata[];
//load shared data
unsigned int tid=threadIdx.x;
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int gridSize=blockSize*2*gridDim.x;
sdata[tid]=true;
while(i<size){
bool comp_1=(p_1[i]==p_2[i]);
bool comp_2=(p_1[i+blockDim.x]==p_2[i+blockDim.x]);
sdata[tid]=(comp_1 && comp_2 );
i+=gridSize;
}
__syncthreads();
//reduce in shared mem
if(blockSize>=512){
if(tid<256){
sdata[tid]&=sdata[tid+256];
}
__syncthreads();
}
if(blockSize>=256){
if(tid<128){
sdata[tid]&=sdata[tid+128];
}
__syncthreads();
}
if(blockSize>=128){
if(tid<64){
sdata[tid]&=sdata[tid+64];
}
__syncthreads();
}
if(tid<32){
if(blockSize>=64)sdata[tid]&=sdata[tid+32];
if(blockSize>=32)sdata[tid]&=sdata[tid+16];
if(blockSize>=16)sdata[tid]&=sdata[tid+8];
if(blockSize>=8)sdata[tid]&=sdata[tid+4];
if(blockSize>=4)sdata[tid]&=sdata[tid+2];
if(blockSize>=2)sdata[tid]&=sdata[tid+1];
}
//write result to global mem
if (tid==0) result[blockIdx.x]=sdata[0];
};
/************************************equality operator******************/
template<typename A,typename B,typename C,typename D>
bool Internal::Equality_false<A,B,C,D>::operator()( const Tree<A,B>& tree_1,
const Tree<C,D>& tree_2){
return false;
};
template<typename T,typename L>
bool Internal::Equality_device<T,L>::operator()( const Tree<T,L>& tree_1,
const Tree<T,L>& tree_2){
int width=tree_1.width();
typedef typename Tree<T,L>::pointer pointer;
typedef Memory::location<Memory::Region::device> Location;
Location location;
if(tree_1.width()==tree_2.width()){
int grid=tree_1._cudaMingridSize;
int block=tree_1._cudaBlockSize;
bool result=true;
bool* result_temp=static_cast<bool*>(location.New(sizeof(bool)*grid));
bool result_a[grid];
int width=tree_1.width();
for(int i=0; i<width; i++){
pointer ptr_1=tree_1.getbranch(i);
pointer ptr_2=tree_2.getbranch(i);
#define kernel(x) case x: \
Internal::tree_leave_equality<pointer,x> \
<<<grid,block,block>>>(ptr_1,ptr_2,result_temp,width); \
break;
if(ptr_1 && ptr_2){
switch(block){
kernel(512)
kernel(256)
kernel(128)
kernel(64)
kernel(32)
kernel(16)
kernel(8)
kernel(4)
kernel(2)
kernel(1)
}
#undef kernel
cudaDeviceSynchronize();
location.MemCopy(result_temp,result_a,sizeof(bool)*grid);
result=std::any_of(result_a,result_a+grid, [](bool b){return ~b;} );
}else{
bool p_1=ptr_1;
bool p_2=ptr_2;
if(p_1 xor p_2)
result=false;
else
result=true;
};
if(!result)
break;
}
location.Delete(result_temp);
return result;
}else{
return false;
}
};
template<typename T,typename L>
bool Internal::Equality_host<T,L>::operator()( const Tree<T,L>& tree_1,
const Tree<T,L>& tree_2){
typedef typename Tree<T,L>::pointer pointer;
if(tree_1.width()==tree_2.width()){
int width=tree_2.width();
bool result=true;
for(int i=0; i<width; i++){
pointer ptr_1=tree_1.getbranch(i);
pointer ptr_2=tree_2.getbranch(i);
if(ptr_1 && ptr_2){
result=std::equal( ptr_1,
ptr_1+width,
ptr_2
);
}else{
bool p_1=ptr_1;
bool p_2=ptr_2;
if(p_1 xor p_2)
result=false;
else
result=true;
}
if(!result)
break;
}
return result;
}else{
return false;
}
};
|
f82fbf3879223beffa9eb327980c7fe28dd6eba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for fast math: fast_divmod.
*/
#include <cstdlib>
#include <ctime>
#include "../common/cutlass_unit_test.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion template
template <typename data_t>
__global__ void divmod_kernel(data_t* src, int* div, int* quo, data_t* rem,
int N) {
unsigned int mul, shr;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = idx; i < N; i += stride) {
cutlass::find_divisor(mul, shr, div[i]);
cutlass::fast_divmod(quo[i], rem[i], src[i], div[i], mul, shr);
}
}
/// Conversion template
template <typename data_t>
void divmod_host(data_t* src, int* div, int* quo, data_t* rem, int N) {
for (int i = 0; i < N; i += 1) {
unsigned int mul, shr;
cutlass::find_divisor(mul, shr, div[i]);
cutlass::fast_divmod(quo[i], rem[i], src[i], div[i], mul, shr);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
void CalculateDivMod_device() {
using ITensor = cutlass::HostTensor<int, cutlass::layout::RowMajor>;
using ETensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>;
static const int Length = (1 << 20);
static const int Extent = (1 << 20);
srand((unsigned)time(NULL));
ETensor Src({1, Length});
ITensor Div({1, Length});
ITensor Quo({1, Length});
ETensor Rem({1, Length});
ITensor Quo_gold({1, Length});
ETensor Rem_gold({1, Length});
for (int i = 0; i < Length; ++i) {
Src.host_data()[i] = (Element)(rand() % Extent);
Div.host_data()[i] = (rand() % Extent + 1);
Quo.host_data()[i] = 0;
Rem.host_data()[i] = (Element)0;
Quo_gold.host_data()[i] = int(Src.host_data()[i] / Div.host_data()[i]);
Rem_gold.host_data()[i] = Src.host_data()[i] -
Quo_gold.host_data()[i] * Div.host_data()[i];
}
Rem.sync_device();
Quo.sync_device();
Div.sync_device();
Src.sync_device();
int block = 256;
int grid = (Length + block - 1) / block > 256
? 256
: (Length + block - 1) / block;
hipLaunchKernelGGL(( test::core::kernel::divmod_kernel<Element>), dim3(grid), dim3(block), 0, 0,
reinterpret_cast<Element*>(Src.device_data()),
reinterpret_cast<int*>(Div.device_data()),
reinterpret_cast<int*>(Quo.device_data()),
reinterpret_cast<Element*>(Rem.device_data()), Length);
Quo.sync_host();
Rem.sync_host();
for (int i = 0; i < Length; ++i) {
int quo_gold = Quo_gold.host_data()[i];
Element rem_gold = Rem_gold.host_data()[i];
int quo = Quo.host_data()[i];
Element rem = Rem.host_data()[i];
EXPECT_TRUE(quo_gold == quo);
EXPECT_TRUE(rem_gold == rem);
}
}
TEST(CalculateDivMod_device, int) {
CalculateDivMod_device<int>();
}
TEST(CalculateDivMod_device, int64_t) {
CalculateDivMod_device<int64_t>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
void CalculateDivMod_host() {
using ITensor = cutlass::HostTensor<int, cutlass::layout::RowMajor>;
using ETensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>;
static const int Length = (1 << 10);
static const int Extent = (1 << 20);
srand((unsigned)time(NULL));
ETensor Src({1, Length});
ITensor Div({1, Length});
ITensor Quo({1, Length});
ETensor Rem({1, Length});
ITensor Quo_gold({1, Length});
ETensor Rem_gold({1, Length});
for (int i = 0; i < Length; ++i) {
Src.host_data()[i] = (Element)(rand() % Extent);
Div.host_data()[i] = (rand() % Extent + 1);
Quo.host_data()[i] = 0;
Rem.host_data()[i] = (Element)0;
Quo_gold.host_data()[i] = int(Src.host_data()[i] / Div.host_data()[i]);
Rem_gold.host_data()[i] = Src.host_data()[i] -
Quo_gold.host_data()[i] * Div.host_data()[i];
}
test::core::kernel::divmod_host<Element>(
reinterpret_cast<Element*>(Src.host_data()),
reinterpret_cast<int*>(Div.host_data()),
reinterpret_cast<int*>(Quo.host_data()),
reinterpret_cast<Element*>(Rem.host_data()), Length);
for (int i = 0; i < Length; ++i) {
int quo_gold = Quo_gold.host_data()[i];
Element rem_gold = Rem_gold.host_data()[i];
int quo = Quo.host_data()[i];
Element rem = Rem.host_data()[i];
EXPECT_TRUE(quo_gold == quo);
EXPECT_TRUE(rem_gold == rem);
}
}
TEST(CalculateDivMod_host, int) {
CalculateDivMod_host<int>();
}
TEST(CalculateDivMod_host, int64_t) {
CalculateDivMod_host<int64_t>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| f82fbf3879223beffa9eb327980c7fe28dd6eba1.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for fast math: fast_divmod.
*/
#include <cstdlib>
#include <ctime>
#include "../common/cutlass_unit_test.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion template
template <typename data_t>
__global__ void divmod_kernel(data_t* src, int* div, int* quo, data_t* rem,
int N) {
unsigned int mul, shr;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = idx; i < N; i += stride) {
cutlass::find_divisor(mul, shr, div[i]);
cutlass::fast_divmod(quo[i], rem[i], src[i], div[i], mul, shr);
}
}
/// Conversion template
template <typename data_t>
void divmod_host(data_t* src, int* div, int* quo, data_t* rem, int N) {
for (int i = 0; i < N; i += 1) {
unsigned int mul, shr;
cutlass::find_divisor(mul, shr, div[i]);
cutlass::fast_divmod(quo[i], rem[i], src[i], div[i], mul, shr);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
void CalculateDivMod_device() {
using ITensor = cutlass::HostTensor<int, cutlass::layout::RowMajor>;
using ETensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>;
static const int Length = (1 << 20);
static const int Extent = (1 << 20);
srand((unsigned)time(NULL));
ETensor Src({1, Length});
ITensor Div({1, Length});
ITensor Quo({1, Length});
ETensor Rem({1, Length});
ITensor Quo_gold({1, Length});
ETensor Rem_gold({1, Length});
for (int i = 0; i < Length; ++i) {
Src.host_data()[i] = (Element)(rand() % Extent);
Div.host_data()[i] = (rand() % Extent + 1);
Quo.host_data()[i] = 0;
Rem.host_data()[i] = (Element)0;
Quo_gold.host_data()[i] = int(Src.host_data()[i] / Div.host_data()[i]);
Rem_gold.host_data()[i] = Src.host_data()[i] -
Quo_gold.host_data()[i] * Div.host_data()[i];
}
Rem.sync_device();
Quo.sync_device();
Div.sync_device();
Src.sync_device();
int block = 256;
int grid = (Length + block - 1) / block > 256
? 256
: (Length + block - 1) / block;
test::core::kernel::divmod_kernel<Element><<<grid, block>>>(
reinterpret_cast<Element*>(Src.device_data()),
reinterpret_cast<int*>(Div.device_data()),
reinterpret_cast<int*>(Quo.device_data()),
reinterpret_cast<Element*>(Rem.device_data()), Length);
Quo.sync_host();
Rem.sync_host();
for (int i = 0; i < Length; ++i) {
int quo_gold = Quo_gold.host_data()[i];
Element rem_gold = Rem_gold.host_data()[i];
int quo = Quo.host_data()[i];
Element rem = Rem.host_data()[i];
EXPECT_TRUE(quo_gold == quo);
EXPECT_TRUE(rem_gold == rem);
}
}
TEST(CalculateDivMod_device, int) {
CalculateDivMod_device<int>();
}
TEST(CalculateDivMod_device, int64_t) {
CalculateDivMod_device<int64_t>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
void CalculateDivMod_host() {
using ITensor = cutlass::HostTensor<int, cutlass::layout::RowMajor>;
using ETensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>;
static const int Length = (1 << 10);
static const int Extent = (1 << 20);
srand((unsigned)time(NULL));
ETensor Src({1, Length});
ITensor Div({1, Length});
ITensor Quo({1, Length});
ETensor Rem({1, Length});
ITensor Quo_gold({1, Length});
ETensor Rem_gold({1, Length});
for (int i = 0; i < Length; ++i) {
Src.host_data()[i] = (Element)(rand() % Extent);
Div.host_data()[i] = (rand() % Extent + 1);
Quo.host_data()[i] = 0;
Rem.host_data()[i] = (Element)0;
Quo_gold.host_data()[i] = int(Src.host_data()[i] / Div.host_data()[i]);
Rem_gold.host_data()[i] = Src.host_data()[i] -
Quo_gold.host_data()[i] * Div.host_data()[i];
}
test::core::kernel::divmod_host<Element>(
reinterpret_cast<Element*>(Src.host_data()),
reinterpret_cast<int*>(Div.host_data()),
reinterpret_cast<int*>(Quo.host_data()),
reinterpret_cast<Element*>(Rem.host_data()), Length);
for (int i = 0; i < Length; ++i) {
int quo_gold = Quo_gold.host_data()[i];
Element rem_gold = Rem_gold.host_data()[i];
int quo = Quo.host_data()[i];
Element rem = Rem.host_data()[i];
EXPECT_TRUE(quo_gold == quo);
EXPECT_TRUE(rem_gold == rem);
}
}
TEST(CalculateDivMod_host, int) {
CalculateDivMod_host<int>();
}
TEST(CalculateDivMod_host, int64_t) {
CalculateDivMod_host<int64_t>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
07a3ec4594385a8ce3267b3d0a58242808109da0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copy_mem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *source = NULL;
hipMalloc(&source, XSIZE*YSIZE);
unsigned char *render = NULL;
hipMalloc(&render, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copy_mem), dim3(gridBlock),dim3(threadBlock), 0, 0, source,render);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copy_mem), dim3(gridBlock),dim3(threadBlock), 0, 0, source,render);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copy_mem), dim3(gridBlock),dim3(threadBlock), 0, 0, source,render);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 07a3ec4594385a8ce3267b3d0a58242808109da0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copy_mem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *source = NULL;
cudaMalloc(&source, XSIZE*YSIZE);
unsigned char *render = NULL;
cudaMalloc(&render, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copy_mem<<<gridBlock,threadBlock>>>(source,render);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copy_mem<<<gridBlock,threadBlock>>>(source,render);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copy_mem<<<gridBlock,threadBlock>>>(source,render);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e85d8a4a7f30c075a01b4112c106d2d400f654c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GraphCuts.h"
int GraphCuts::graphCutsInit(int widthGrid, int heightGrid, int labels)
{
deviceCount = checkDevice();
printf("No. of devices %d\n", deviceCount);
if (deviceCount < 1)
return -1;
int cuda_device = 0;
hipSetDevice(cuda_device);
hipDeviceProp_t device_properties;
(hipGetDeviceProperties(&device_properties, cuda_device));
if ((3 <= device_properties.major) && (device_properties.minor < 1))
deviceCheck = 2;
else
if ((3 <= device_properties.major) && (device_properties.minor >= 1))
deviceCheck = 1;
else
deviceCheck = 0;
width = widthGrid;
height = heightGrid;
num_Labels = labels;
blocks_x = 1;
blocks_y = 1;
num_of_blocks = 1;
num_of_threads_per_block = 256;
threads_x = 32;
threads_y = 8;
width1 = threads_x * ((int)ceil((float)width / (float)threads_x));
height1 = threads_y * ((int)ceil((float)height / (float)threads_y));
graph_size = width * height;
graph_size1 = width1 * height1;
size_int = sizeof(int)* graph_size1;
blocks_x = (int)((ceil)((float)width1 / (float)threads_x));
blocks_y = (int)((ceil)((float)height1 / (float)threads_y));
num_of_blocks = (int)((ceil)((float)graph_size1 / (float)num_of_threads_per_block));
h_mem_init();
d_mem_init();
cueValues = 0;
return deviceCheck;
}
int GraphCuts::checkDevice()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0)
{
return -1;
}
return deviceCount;
}
void GraphCuts::h_mem_init()
{
h_reset_mem = (int*)malloc(sizeof(int)* graph_size1);
h_graph_height = (int*)malloc(size_int);
pixelLabel = (int*)malloc(size_int);
h_pixel_mask = (bool*)malloc(sizeof(bool)* graph_size1);
for (int i = 0; i < graph_size1; i++)
{
pixelLabel[i] = 0;
h_graph_height[i] = 0;
}
for (int i = 0; i < graph_size1; i++)
{
h_reset_mem[i] = 0;
}
}
void GraphCuts::d_mem_init()
{
hipMalloc((void**)&d_left_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&d_right_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&d_down_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&d_up_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&d_push_reser, sizeof(int)* graph_size1);
hipMalloc((void**)&d_sink_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&s_left_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&s_right_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&s_down_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&s_up_weight, sizeof(int)* graph_size1);
hipMalloc((void**)&s_push_reser, sizeof(int)* graph_size1);
hipMalloc((void**)&s_sink_weight, sizeof(int)* graph_size1);
(hipMalloc((void**)&d_stochastic, sizeof(int)* num_of_blocks));
(hipMalloc((void**)&d_stochastic_pixel, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_terminate, sizeof(int)* num_of_blocks));
(hipMalloc((void**)&d_pull_left, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_pull_right, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_pull_down, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_pull_up, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_graph_heightr, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_graph_heightw, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_relabel_mask, sizeof(int)* graph_size1));
(hipMalloc((void**)&d_pixel_mask, sizeof(bool)*graph_size1));
(hipMalloc((void**)&d_over, sizeof(bool)* 1));
(hipMalloc((void**)&d_counter, sizeof(int)));
(hipMalloc((void **)&dPixelLabel, sizeof(int)* width1 * height1));
(hipMemcpy(d_left_weight, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(d_right_weight, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(d_down_weight, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(d_up_weight, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(d_push_reser, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(d_sink_weight, h_reset_mem, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
h_relabel_mask = (int*)malloc(sizeof(int)*width1*height1);
h_stochastic = (int *)malloc(sizeof(int)* num_of_blocks);
h_stochastic_pixel = (int *)malloc(sizeof(int)* graph_size1);
for (int i = 0; i < graph_size1; i++)
h_relabel_mask[i] = 1;
(hipMemcpy(d_relabel_mask, h_relabel_mask, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
int *dpixlab = (int*)malloc(sizeof(int)*width1*height1);
for (int i = 0; i < width1 * height1; i++)
{
dpixlab[i] = 0;
h_stochastic_pixel[i] = 1;
}
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 1;
}
(hipMemcpy(d_stochastic, h_stochastic, sizeof(int)* num_of_blocks, hipMemcpyHostToDevice));
(hipMemcpy(d_terminate, h_stochastic, sizeof(int)* num_of_blocks, hipMemcpyHostToDevice));
(hipMemcpy(d_stochastic_pixel, h_stochastic_pixel, sizeof(int)* graph_size1, hipMemcpyHostToDevice));
(hipMemcpy(dPixelLabel, dpixlab, sizeof(int)* width1 * height1, hipMemcpyHostToDevice));
free(dpixlab);
}
int GraphCuts::graphCutsSetupDataTerm()
{
if (deviceCheck < 1)
return -1;
(hipMalloc((void **)&dDataTerm, sizeof(int)* width * height * num_Labels));
(hipMemcpy(dDataTerm, dataTerm, sizeof(int)* width * height * num_Labels, hipMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupSmoothTerm()
{
if (deviceCheck < 1)
return -1;
(hipMalloc((void **)&dSmoothTerm, sizeof(int)* num_Labels * num_Labels));
(hipMemcpy(dSmoothTerm, smoothTerm, sizeof(int)* num_Labels * num_Labels, hipMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupHCue()
{
if (deviceCheck < 1)
return -1;
(hipMalloc((void **)&dHcue, sizeof(int)* width * height));
(hipMemcpy(dHcue, hCue, sizeof(int)* width * height, hipMemcpyHostToDevice));
cueValues = 1;
return 0;
}
int GraphCuts::graphCutsSetupVCue()
{
if (deviceCheck < 1)
return -1;
(hipMalloc((void **)&dVcue, sizeof(int)* width * height));
(hipMemcpy(dVcue, vCue, sizeof(int)* width * height, hipMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupGraph()
{
if (deviceCheck < 1)
return -1;
int alpha_label = 1;
for (int i = 0; i < graph_size1; i++)
{
h_reset_mem[i] = 0;
h_graph_height[i] = 0;
}
int blockEdge = (int)((ceil)((float)(width * height) / (float)256));
dim3 block_weight(256, 1, 1);
dim3 grid_weight(blockEdge, 1, 1);
if (cueValues == 1)
{
hipLaunchKernelGGL(( CudaWeightCue) , dim3(grid_weight), dim3(block_weight) , 0, 0, alpha_label, d_left_weight, d_right_weight, d_down_weight,
d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm,
dSmoothTerm, dHcue, dVcue, width, height, 2);
}
else
{
hipLaunchKernelGGL(( CudaWeight) , dim3(grid_weight), dim3(block_weight) , 0, 0, alpha_label, d_left_weight, d_right_weight, d_down_weight,
d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm,
dSmoothTerm, width, height, 2);
}
int *temp_left_weight, *temp_right_weight, *temp_down_weight, *temp_up_weight, *temp_source_weight, *temp_terminal_weight;
(hipMalloc((void **)&temp_left_weight, sizeof(int)* graph_size1));
(hipMalloc((void **)&temp_right_weight, sizeof(int)* graph_size1));
(hipMalloc((void **)&temp_down_weight, sizeof(int)* graph_size1));
(hipMalloc((void **)&temp_up_weight, sizeof(int)* graph_size1));
(hipMalloc((void **)&temp_source_weight, sizeof(int)* graph_size1));
(hipMalloc((void **)&temp_terminal_weight, sizeof(int)* graph_size1));
int blockEdge1 = (int)((ceil)((float)(width1 * height1) / (float)256));
dim3 block_weight1(256, 1, 1);
dim3 grid_weight1(blockEdge1, 1, 1);
hipLaunchKernelGGL(( adjustedgeweight) , dim3(grid_weight1), dim3(block_weight1) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser,
d_sink_weight, temp_left_weight, temp_right_weight, temp_down_weight, temp_up_weight,
temp_source_weight, temp_terminal_weight, width, height, graph_size, width1,
height1, graph_size1);
hipLaunchKernelGGL(( copyedgeweight) , dim3(grid_weight1), dim3(block_weight1) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight,
temp_left_weight, temp_right_weight, temp_down_weight, temp_up_weight, temp_source_weight,
temp_terminal_weight, d_pull_left, d_pull_right, d_pull_down, d_pull_up, d_relabel_mask,
d_graph_heightr, d_graph_heightw, width, height, graph_size, width1, height1, graph_size1);
(hipFree(temp_left_weight));
(hipFree(temp_right_weight));
(hipFree(temp_up_weight));
(hipFree(temp_down_weight));
(hipFree(temp_source_weight));
(hipFree(temp_terminal_weight));
return 0;
}
int GraphCuts::graphCutsAtomicOptimize()
{
if (deviceCheck < 1)
{
return -1;
}
graphCutsAtomic();
bfsLabeling();
return 0;
}
int GraphCuts::graphCutsStochasticOptimize()
{
if (deviceCheck < 1)
{
return -1;
}
graphCutsStochastic();
bfsLabeling();
return 0;
}
void GraphCuts::graphCutsAtomic()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
bool finish = true;
counter = num_of_blocks;
int numThreadsEnd = 256, numBlocksEnd = 1;
if (numThreadsEnd > counter)
{
numBlocksEnd = 1;
numThreadsEnd = counter;
}
else
{
numBlocksEnd = (int)ceil(counter / (double)numThreadsEnd);
}
dim3 End_block(numThreadsEnd, 1, 1);
dim3 End_grid(numBlocksEnd, 1, 1);
int *d_counter;
bool *d_finish;
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 0;
}
(hipMalloc((void**)&d_counter, sizeof(int)));
(hipMalloc((void**)&d_finish, sizeof(bool)));
(hipMemcpy(d_counter, &counter, sizeof(int), hipMemcpyHostToDevice));
counter = 0;
int *d_relabel;
(hipMalloc((void**)&d_relabel, sizeof(int)));
int h_relabel = 0;
int block_num = width1 / 32;
int *d_block_num;
(hipMalloc((void**)&d_block_num, sizeof(int)));
(hipMemcpy(d_block_num, &block_num, sizeof(int), hipMemcpyHostToDevice));
int h_count_blocks = num_of_blocks;
int *d_count_blocks;
(hipMalloc((void**)&d_count_blocks, sizeof(int)));
(hipMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), hipMemcpyHostToDevice));
h_count_blocks = 0;
(hipMemcpy(d_relabel, &h_relabel, sizeof(int), hipMemcpyHostToDevice));
counter = 1;
hipLaunchKernelGGL(( kernel_push1_start_atomic) , dim3(grid_push), dim3(block_push) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_relabel, d_stochastic, d_counter, d_finish);
int h_terminate_condition = 0;
(hipDeviceSynchronize());
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
do
{
if (counter % 10 == 0)
{
finish = true;
(hipMemcpy(d_finish, &finish, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_push_stochastic1) , dim3(grid_push), dim3(block_push) , 0, 0, d_push_reser, s_push_reser, d_count_blocks, d_finish, d_block_num, width1);
(hipMemcpy(&finish, d_finish, sizeof(bool), hipMemcpyDeviceToHost));
if (finish == false)
h_terminate_condition++;
}
if (counter % 11 == 0)
{
(hipMemset(d_terminate, 0, sizeof(int)*num_of_blocks));
h_count_blocks = 0;
(hipMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_push_atomic2) , dim3(grid_push), dim3(block_push) , 0, 0, d_terminate, d_push_reser, s_push_reser, d_block_num, width1);
hipLaunchKernelGGL(( kernel_End) , dim3(End_grid), dim3(End_block) , 0, 0, d_terminate, d_count_blocks, d_counter);
}
if (counter % 2 == 0)
{
hipLaunchKernelGGL(( kernel_push1_atomic) , dim3(grid_push), dim3(block_push) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1);
hipLaunchKernelGGL(( kernel_relabel_atomic) , dim3(grid_push), dim3(block_push) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1);
}
else
{
hipLaunchKernelGGL(( kernel_push1_atomic) , dim3(grid_push), dim3(block_push) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1);
hipLaunchKernelGGL(( kernel_relabel_atomic) , dim3(grid_push), dim3(block_push) , 0, 0, d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1);
}
counter++;
} while (h_terminate_condition != 2);
(hipEventRecord(stop, 0));
(hipEventSynchronize(stop));
}
void GraphCuts::graphCutsStochastic()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
bool finish = true;
counter = num_of_blocks;
int numThreadsEnd = 256, numBlocksEnd = 1;
if (numThreadsEnd > counter)
{
numBlocksEnd = 1;
numThreadsEnd = counter;
}
else
{
numBlocksEnd = (int)ceil(counter / (double)numThreadsEnd);
}
dim3 End_block(numThreadsEnd, 1, 1);
dim3 End_grid(numBlocksEnd, 1, 1);
bool *d_finish;
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 0;
}
(hipMalloc((void**)&d_counter, sizeof(int)));
(hipMalloc((void**)&d_finish, sizeof(bool)));
(hipMemcpy(d_counter, &counter, sizeof(int), hipMemcpyHostToDevice));
counter = 0;
int *d_relabel;
(hipMalloc((void**)&d_relabel, sizeof(int)));
int h_relabel = 0;
int block_num = width1 / 32;
int *d_block_num;
(hipMalloc((void**)&d_block_num, sizeof(int)));
(hipMemcpy(d_block_num, &block_num, sizeof(int), hipMemcpyHostToDevice));
int h_count_blocks = num_of_blocks;
int *d_count_blocks;
(hipMalloc((void**)&d_count_blocks, sizeof(int)));
(hipMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), hipMemcpyHostToDevice));
h_count_blocks = 0;
(hipMemcpy(d_relabel, &h_relabel, sizeof(int), hipMemcpyHostToDevice));
counter = 1;
kernel_push1_start_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_relabel, d_stochastic, d_counter, d_finish);
int h_terminate_condition = 0;
(hipDeviceSynchronize());
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
do
{
if (counter % 10 == 0)
{
finish = true;
(hipMemcpy(d_finish, &finish, sizeof(bool), hipMemcpyHostToDevice));
kernel_push_stochastic1 << <grid_push, block_push >> >(d_push_reser, s_push_reser, d_count_blocks, d_finish, d_block_num, width1);
(hipMemcpy(&finish, d_finish, sizeof(bool), hipMemcpyDeviceToHost));
}
if (counter % 11 == 0)
{
(hipMemset(d_stochastic, 0, sizeof(int)*num_of_blocks));
(hipMemset(d_terminate, 0, sizeof(int)*num_of_blocks));
h_count_blocks = 0;
(hipMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), hipMemcpyHostToDevice));
kernel_push_stochastic2 << <grid_push, block_push >> >(d_terminate, d_relabel_mask, d_push_reser, s_push_reser, d_stochastic, d_block_num, width1);
kernel_End << <End_grid, End_block >> >(d_terminate, d_count_blocks, d_counter);
if (finish == false && counter % 121 != 0 && counter > 0)
h_terminate_condition++;
}
if (counter % 2 == 0)
{
kernel_push1_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
kernel_relabel_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
}
else
{
kernel_push1_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
kernel_relabel_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
}
counter++;
} while (h_terminate_condition == 0 && counter < 500);
(hipEventRecord(stop, 0));
(hipEventSynchronize(stop));
float time;
(hipEventElapsedTime(&time, start, stop));
printf("TT Cuts :: %f ms\n", time);
}
void GraphCuts::bfsLabeling()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
(hipMemcpy(d_graph_heightr, h_graph_height, size_int, hipMemcpyHostToDevice));
for (int i = 0; i < graph_size; i++)
h_pixel_mask[i] = true;
(hipMemcpy(d_pixel_mask, h_pixel_mask, sizeof(bool)* graph_size1, hipMemcpyHostToDevice));
kernel_bfs_t << <d_grid, d_block, 0 >> >(d_push_reser, d_sink_weight, d_graph_heightr, d_pixel_mask, graph_size, width, height, graph_size1, width1, height1);
counter = 1;
(hipMemcpy(d_counter, &counter, sizeof(int), hipMemcpyHostToDevice));
do
{
h_over = false;
(hipMemcpy(d_over, &h_over, sizeof(bool), hipMemcpyHostToDevice));
kernel_bfs << < d_grid, d_block, 0 >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_graph_heightr, d_pixel_mask,
graph_size, width, height, graph_size1, width1, height1, d_over, d_counter);
(hipMemcpy(&h_over, d_over, sizeof(bool), hipMemcpyDeviceToHost));
counter++;
(hipMemcpy(d_counter, &counter, sizeof(int), hipMemcpyHostToDevice));
} while (h_over);
(hipMemcpy(h_graph_height, d_graph_heightr, size_int, hipMemcpyDeviceToHost));
}
int GraphCuts::graphCutsGetResult()
{
if (deviceCheck < 1)
return -1;
int alpha = 1;
for (int i = 0; i < graph_size1; i++)
{
int row_here = i / width1, col_here = i % width1;
if (h_graph_height[i]>0 && row_here < height && row_here > 0 && col_here < width && col_here > 0) {
pixelLabel[i] = alpha;
}
}
return 0;
}
void GraphCuts::graphCutsFreeMem()
{
free(h_reset_mem);
free(h_graph_height);
free(pixelLabel);
free(h_pixel_mask);
free(h_relabel_mask);
free(h_stochastic);
free(h_stochastic_pixel);
free(hCue);
free(vCue);
free(dataTerm);
free(smoothTerm);
(hipFree(d_left_weight));
(hipFree(d_right_weight));
(hipFree(d_down_weight));
(hipFree(d_up_weight));
(hipFree(d_sink_weight));
(hipFree(d_push_reser));
(hipFree(d_pull_left));
(hipFree(d_pull_right));
(hipFree(d_pull_down));
(hipFree(d_pull_up));
(hipFree(d_graph_heightr));
(hipFree(d_graph_heightw));
(hipFree(s_left_weight));
(hipFree(s_right_weight));
(hipFree(s_down_weight));
(hipFree(s_up_weight));
(hipFree(s_push_reser));
(hipFree(s_sink_weight));
(hipFree(d_stochastic));
(hipFree(d_stochastic_pixel));
(hipFree(d_terminate));
(hipFree(d_relabel_mask));
(hipFree(d_pixel_mask));
(hipFree(d_over));
(hipFree(d_counter));
(hipFree(dPixelLabel));
}
| e85d8a4a7f30c075a01b4112c106d2d400f654c3.cu | #include "GraphCuts.h"
int GraphCuts::graphCutsInit(int widthGrid, int heightGrid, int labels)
{
deviceCount = checkDevice();
printf("No. of devices %d\n", deviceCount);
if (deviceCount < 1)
return -1;
int cuda_device = 0;
cudaSetDevice(cuda_device);
cudaDeviceProp device_properties;
(cudaGetDeviceProperties(&device_properties, cuda_device));
if ((3 <= device_properties.major) && (device_properties.minor < 1))
deviceCheck = 2;
else
if ((3 <= device_properties.major) && (device_properties.minor >= 1))
deviceCheck = 1;
else
deviceCheck = 0;
width = widthGrid;
height = heightGrid;
num_Labels = labels;
blocks_x = 1;
blocks_y = 1;
num_of_blocks = 1;
num_of_threads_per_block = 256;
threads_x = 32;
threads_y = 8;
width1 = threads_x * ((int)ceil((float)width / (float)threads_x));
height1 = threads_y * ((int)ceil((float)height / (float)threads_y));
graph_size = width * height;
graph_size1 = width1 * height1;
size_int = sizeof(int)* graph_size1;
blocks_x = (int)((ceil)((float)width1 / (float)threads_x));
blocks_y = (int)((ceil)((float)height1 / (float)threads_y));
num_of_blocks = (int)((ceil)((float)graph_size1 / (float)num_of_threads_per_block));
h_mem_init();
d_mem_init();
cueValues = 0;
return deviceCheck;
}
int GraphCuts::checkDevice()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
{
return -1;
}
return deviceCount;
}
void GraphCuts::h_mem_init()
{
h_reset_mem = (int*)malloc(sizeof(int)* graph_size1);
h_graph_height = (int*)malloc(size_int);
pixelLabel = (int*)malloc(size_int);
h_pixel_mask = (bool*)malloc(sizeof(bool)* graph_size1);
for (int i = 0; i < graph_size1; i++)
{
pixelLabel[i] = 0;
h_graph_height[i] = 0;
}
for (int i = 0; i < graph_size1; i++)
{
h_reset_mem[i] = 0;
}
}
void GraphCuts::d_mem_init()
{
cudaMalloc((void**)&d_left_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&d_right_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&d_down_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&d_up_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&d_push_reser, sizeof(int)* graph_size1);
cudaMalloc((void**)&d_sink_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_left_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_right_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_down_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_up_weight, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_push_reser, sizeof(int)* graph_size1);
cudaMalloc((void**)&s_sink_weight, sizeof(int)* graph_size1);
(cudaMalloc((void**)&d_stochastic, sizeof(int)* num_of_blocks));
(cudaMalloc((void**)&d_stochastic_pixel, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_terminate, sizeof(int)* num_of_blocks));
(cudaMalloc((void**)&d_pull_left, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_pull_right, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_pull_down, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_pull_up, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_graph_heightr, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_graph_heightw, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_relabel_mask, sizeof(int)* graph_size1));
(cudaMalloc((void**)&d_pixel_mask, sizeof(bool)*graph_size1));
(cudaMalloc((void**)&d_over, sizeof(bool)* 1));
(cudaMalloc((void**)&d_counter, sizeof(int)));
(cudaMalloc((void **)&dPixelLabel, sizeof(int)* width1 * height1));
(cudaMemcpy(d_left_weight, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(d_right_weight, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(d_down_weight, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(d_up_weight, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(d_push_reser, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(d_sink_weight, h_reset_mem, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
h_relabel_mask = (int*)malloc(sizeof(int)*width1*height1);
h_stochastic = (int *)malloc(sizeof(int)* num_of_blocks);
h_stochastic_pixel = (int *)malloc(sizeof(int)* graph_size1);
for (int i = 0; i < graph_size1; i++)
h_relabel_mask[i] = 1;
(cudaMemcpy(d_relabel_mask, h_relabel_mask, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
int *dpixlab = (int*)malloc(sizeof(int)*width1*height1);
for (int i = 0; i < width1 * height1; i++)
{
dpixlab[i] = 0;
h_stochastic_pixel[i] = 1;
}
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 1;
}
(cudaMemcpy(d_stochastic, h_stochastic, sizeof(int)* num_of_blocks, cudaMemcpyHostToDevice));
(cudaMemcpy(d_terminate, h_stochastic, sizeof(int)* num_of_blocks, cudaMemcpyHostToDevice));
(cudaMemcpy(d_stochastic_pixel, h_stochastic_pixel, sizeof(int)* graph_size1, cudaMemcpyHostToDevice));
(cudaMemcpy(dPixelLabel, dpixlab, sizeof(int)* width1 * height1, cudaMemcpyHostToDevice));
free(dpixlab);
}
int GraphCuts::graphCutsSetupDataTerm()
{
if (deviceCheck < 1)
return -1;
(cudaMalloc((void **)&dDataTerm, sizeof(int)* width * height * num_Labels));
(cudaMemcpy(dDataTerm, dataTerm, sizeof(int)* width * height * num_Labels, cudaMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupSmoothTerm()
{
if (deviceCheck < 1)
return -1;
(cudaMalloc((void **)&dSmoothTerm, sizeof(int)* num_Labels * num_Labels));
(cudaMemcpy(dSmoothTerm, smoothTerm, sizeof(int)* num_Labels * num_Labels, cudaMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupHCue()
{
if (deviceCheck < 1)
return -1;
(cudaMalloc((void **)&dHcue, sizeof(int)* width * height));
(cudaMemcpy(dHcue, hCue, sizeof(int)* width * height, cudaMemcpyHostToDevice));
cueValues = 1;
return 0;
}
int GraphCuts::graphCutsSetupVCue()
{
if (deviceCheck < 1)
return -1;
(cudaMalloc((void **)&dVcue, sizeof(int)* width * height));
(cudaMemcpy(dVcue, vCue, sizeof(int)* width * height, cudaMemcpyHostToDevice));
return 0;
}
int GraphCuts::graphCutsSetupGraph()
{
if (deviceCheck < 1)
return -1;
int alpha_label = 1;
for (int i = 0; i < graph_size1; i++)
{
h_reset_mem[i] = 0;
h_graph_height[i] = 0;
}
int blockEdge = (int)((ceil)((float)(width * height) / (float)256));
dim3 block_weight(256, 1, 1);
dim3 grid_weight(blockEdge, 1, 1);
if (cueValues == 1)
{
CudaWeightCue <<< grid_weight, block_weight >>>(alpha_label, d_left_weight, d_right_weight, d_down_weight,
d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm,
dSmoothTerm, dHcue, dVcue, width, height, 2);
}
else
{
CudaWeight <<< grid_weight, block_weight >>>(alpha_label, d_left_weight, d_right_weight, d_down_weight,
d_up_weight, d_push_reser, d_sink_weight, dPixelLabel, dDataTerm,
dSmoothTerm, width, height, 2);
}
int *temp_left_weight, *temp_right_weight, *temp_down_weight, *temp_up_weight, *temp_source_weight, *temp_terminal_weight;
(cudaMalloc((void **)&temp_left_weight, sizeof(int)* graph_size1));
(cudaMalloc((void **)&temp_right_weight, sizeof(int)* graph_size1));
(cudaMalloc((void **)&temp_down_weight, sizeof(int)* graph_size1));
(cudaMalloc((void **)&temp_up_weight, sizeof(int)* graph_size1));
(cudaMalloc((void **)&temp_source_weight, sizeof(int)* graph_size1));
(cudaMalloc((void **)&temp_terminal_weight, sizeof(int)* graph_size1));
int blockEdge1 = (int)((ceil)((float)(width1 * height1) / (float)256));
dim3 block_weight1(256, 1, 1);
dim3 grid_weight1(blockEdge1, 1, 1);
adjustedgeweight <<<grid_weight1, block_weight1 >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser,
d_sink_weight, temp_left_weight, temp_right_weight, temp_down_weight, temp_up_weight,
temp_source_weight, temp_terminal_weight, width, height, graph_size, width1,
height1, graph_size1);
copyedgeweight <<<grid_weight1, block_weight1 >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_push_reser, d_sink_weight,
temp_left_weight, temp_right_weight, temp_down_weight, temp_up_weight, temp_source_weight,
temp_terminal_weight, d_pull_left, d_pull_right, d_pull_down, d_pull_up, d_relabel_mask,
d_graph_heightr, d_graph_heightw, width, height, graph_size, width1, height1, graph_size1);
(cudaFree(temp_left_weight));
(cudaFree(temp_right_weight));
(cudaFree(temp_up_weight));
(cudaFree(temp_down_weight));
(cudaFree(temp_source_weight));
(cudaFree(temp_terminal_weight));
return 0;
}
int GraphCuts::graphCutsAtomicOptimize()
{
if (deviceCheck < 1)
{
return -1;
}
graphCutsAtomic();
bfsLabeling();
return 0;
}
int GraphCuts::graphCutsStochasticOptimize()
{
if (deviceCheck < 1)
{
return -1;
}
graphCutsStochastic();
bfsLabeling();
return 0;
}
void GraphCuts::graphCutsAtomic()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
bool finish = true;
counter = num_of_blocks;
int numThreadsEnd = 256, numBlocksEnd = 1;
if (numThreadsEnd > counter)
{
numBlocksEnd = 1;
numThreadsEnd = counter;
}
else
{
numBlocksEnd = (int)ceil(counter / (double)numThreadsEnd);
}
dim3 End_block(numThreadsEnd, 1, 1);
dim3 End_grid(numBlocksEnd, 1, 1);
int *d_counter;
bool *d_finish;
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 0;
}
(cudaMalloc((void**)&d_counter, sizeof(int)));
(cudaMalloc((void**)&d_finish, sizeof(bool)));
(cudaMemcpy(d_counter, &counter, sizeof(int), cudaMemcpyHostToDevice));
counter = 0;
int *d_relabel;
(cudaMalloc((void**)&d_relabel, sizeof(int)));
int h_relabel = 0;
int block_num = width1 / 32;
int *d_block_num;
(cudaMalloc((void**)&d_block_num, sizeof(int)));
(cudaMemcpy(d_block_num, &block_num, sizeof(int), cudaMemcpyHostToDevice));
int h_count_blocks = num_of_blocks;
int *d_count_blocks;
(cudaMalloc((void**)&d_count_blocks, sizeof(int)));
(cudaMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), cudaMemcpyHostToDevice));
h_count_blocks = 0;
(cudaMemcpy(d_relabel, &h_relabel, sizeof(int), cudaMemcpyHostToDevice));
counter = 1;
kernel_push1_start_atomic <<<grid_push, block_push >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_relabel, d_stochastic, d_counter, d_finish);
int h_terminate_condition = 0;
(cudaDeviceSynchronize());
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
do
{
if (counter % 10 == 0)
{
finish = true;
(cudaMemcpy(d_finish, &finish, sizeof(bool), cudaMemcpyHostToDevice));
kernel_push_stochastic1 <<<grid_push, block_push >>>(d_push_reser, s_push_reser, d_count_blocks, d_finish, d_block_num, width1);
(cudaMemcpy(&finish, d_finish, sizeof(bool), cudaMemcpyDeviceToHost));
if (finish == false)
h_terminate_condition++;
}
if (counter % 11 == 0)
{
(cudaMemset(d_terminate, 0, sizeof(int)*num_of_blocks));
h_count_blocks = 0;
(cudaMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), cudaMemcpyHostToDevice));
kernel_push_atomic2 <<<grid_push, block_push >>>(d_terminate, d_push_reser, s_push_reser, d_block_num, width1);
kernel_End <<<End_grid, End_block >>>(d_terminate, d_count_blocks, d_counter);
}
if (counter % 2 == 0)
{
kernel_push1_atomic <<<grid_push, block_push >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1);
kernel_relabel_atomic <<<grid_push, block_push >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1);
}
else
{
kernel_push1_atomic <<<grid_push, block_push >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1);
kernel_relabel_atomic <<<grid_push, block_push >>>(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser, d_pull_left, d_pull_right, d_pull_down, d_pull_up,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1);
}
counter++;
} while (h_terminate_condition != 2);
(cudaEventRecord(stop, 0));
(cudaEventSynchronize(stop));
}
void GraphCuts::graphCutsStochastic()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
bool finish = true;
counter = num_of_blocks;
int numThreadsEnd = 256, numBlocksEnd = 1;
if (numThreadsEnd > counter)
{
numBlocksEnd = 1;
numThreadsEnd = counter;
}
else
{
numBlocksEnd = (int)ceil(counter / (double)numThreadsEnd);
}
dim3 End_block(numThreadsEnd, 1, 1);
dim3 End_grid(numBlocksEnd, 1, 1);
bool *d_finish;
for (int i = 0; i < num_of_blocks; i++)
{
h_stochastic[i] = 0;
}
(cudaMalloc((void**)&d_counter, sizeof(int)));
(cudaMalloc((void**)&d_finish, sizeof(bool)));
(cudaMemcpy(d_counter, &counter, sizeof(int), cudaMemcpyHostToDevice));
counter = 0;
int *d_relabel;
(cudaMalloc((void**)&d_relabel, sizeof(int)));
int h_relabel = 0;
int block_num = width1 / 32;
int *d_block_num;
(cudaMalloc((void**)&d_block_num, sizeof(int)));
(cudaMemcpy(d_block_num, &block_num, sizeof(int), cudaMemcpyHostToDevice));
int h_count_blocks = num_of_blocks;
int *d_count_blocks;
(cudaMalloc((void**)&d_count_blocks, sizeof(int)));
(cudaMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), cudaMemcpyHostToDevice));
h_count_blocks = 0;
(cudaMemcpy(d_relabel, &h_relabel, sizeof(int), cudaMemcpyHostToDevice));
counter = 1;
kernel_push1_start_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_relabel, d_stochastic, d_counter, d_finish);
int h_terminate_condition = 0;
(cudaDeviceSynchronize());
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
do
{
if (counter % 10 == 0)
{
finish = true;
(cudaMemcpy(d_finish, &finish, sizeof(bool), cudaMemcpyHostToDevice));
kernel_push_stochastic1 << <grid_push, block_push >> >(d_push_reser, s_push_reser, d_count_blocks, d_finish, d_block_num, width1);
(cudaMemcpy(&finish, d_finish, sizeof(bool), cudaMemcpyDeviceToHost));
}
if (counter % 11 == 0)
{
(cudaMemset(d_stochastic, 0, sizeof(int)*num_of_blocks));
(cudaMemset(d_terminate, 0, sizeof(int)*num_of_blocks));
h_count_blocks = 0;
(cudaMemcpy(d_count_blocks, &h_count_blocks, sizeof(int), cudaMemcpyHostToDevice));
kernel_push_stochastic2 << <grid_push, block_push >> >(d_terminate, d_relabel_mask, d_push_reser, s_push_reser, d_stochastic, d_block_num, width1);
kernel_End << <End_grid, End_block >> >(d_terminate, d_count_blocks, d_counter);
if (finish == false && counter % 121 != 0 && counter > 0)
h_terminate_condition++;
}
if (counter % 2 == 0)
{
kernel_push1_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
kernel_relabel_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightr, d_graph_heightw, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
}
else
{
kernel_push1_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
kernel_relabel_stochastic << <grid_push, block_push >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight,
d_sink_weight, d_push_reser,
d_relabel_mask, d_graph_heightw, d_graph_heightr, graph_size, width, height,
graph_size1, width1, height1, d_stochastic, d_block_num);
}
counter++;
} while (h_terminate_condition == 0 && counter < 500);
(cudaEventRecord(stop, 0));
(cudaEventSynchronize(stop));
float time;
(cudaEventElapsedTime(&time, start, stop));
printf("TT Cuts :: %f ms\n", time);
}
void GraphCuts::bfsLabeling()
{
dim3 block_push(threads_x, threads_y, 1);
dim3 grid_push(blocks_x, blocks_y, 1);
dim3 d_block(num_of_threads_per_block, 1, 1);
dim3 d_grid(num_of_blocks, 1, 1);
(cudaMemcpy(d_graph_heightr, h_graph_height, size_int, cudaMemcpyHostToDevice));
for (int i = 0; i < graph_size; i++)
h_pixel_mask[i] = true;
(cudaMemcpy(d_pixel_mask, h_pixel_mask, sizeof(bool)* graph_size1, cudaMemcpyHostToDevice));
kernel_bfs_t << <d_grid, d_block, 0 >> >(d_push_reser, d_sink_weight, d_graph_heightr, d_pixel_mask, graph_size, width, height, graph_size1, width1, height1);
counter = 1;
(cudaMemcpy(d_counter, &counter, sizeof(int), cudaMemcpyHostToDevice));
do
{
h_over = false;
(cudaMemcpy(d_over, &h_over, sizeof(bool), cudaMemcpyHostToDevice));
kernel_bfs << < d_grid, d_block, 0 >> >(d_left_weight, d_right_weight, d_down_weight, d_up_weight, d_graph_heightr, d_pixel_mask,
graph_size, width, height, graph_size1, width1, height1, d_over, d_counter);
(cudaMemcpy(&h_over, d_over, sizeof(bool), cudaMemcpyDeviceToHost));
counter++;
(cudaMemcpy(d_counter, &counter, sizeof(int), cudaMemcpyHostToDevice));
} while (h_over);
(cudaMemcpy(h_graph_height, d_graph_heightr, size_int, cudaMemcpyDeviceToHost));
}
int GraphCuts::graphCutsGetResult()
{
if (deviceCheck < 1)
return -1;
int alpha = 1;
for (int i = 0; i < graph_size1; i++)
{
int row_here = i / width1, col_here = i % width1;
if (h_graph_height[i]>0 && row_here < height && row_here > 0 && col_here < width && col_here > 0) {
pixelLabel[i] = alpha;
}
}
return 0;
}
void GraphCuts::graphCutsFreeMem()
{
free(h_reset_mem);
free(h_graph_height);
free(pixelLabel);
free(h_pixel_mask);
free(h_relabel_mask);
free(h_stochastic);
free(h_stochastic_pixel);
free(hCue);
free(vCue);
free(dataTerm);
free(smoothTerm);
(cudaFree(d_left_weight));
(cudaFree(d_right_weight));
(cudaFree(d_down_weight));
(cudaFree(d_up_weight));
(cudaFree(d_sink_weight));
(cudaFree(d_push_reser));
(cudaFree(d_pull_left));
(cudaFree(d_pull_right));
(cudaFree(d_pull_down));
(cudaFree(d_pull_up));
(cudaFree(d_graph_heightr));
(cudaFree(d_graph_heightw));
(cudaFree(s_left_weight));
(cudaFree(s_right_weight));
(cudaFree(s_down_weight));
(cudaFree(s_up_weight));
(cudaFree(s_push_reser));
(cudaFree(s_sink_weight));
(cudaFree(d_stochastic));
(cudaFree(d_stochastic_pixel));
(cudaFree(d_terminate));
(cudaFree(d_relabel_mask));
(cudaFree(d_pixel_mask));
(cudaFree(d_over));
(cudaFree(d_counter));
(cudaFree(dPixelLabel));
}
|
ee4ca7ccb09d6a9009702a62fbf6c0a83631e70a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
// number of threads per block.
// multiple of warp size, 32.
#define BLOCK_SIZE 1024
// Note: matrices are stored in column major order; i.e. the array elements in
// the (m x n) matrix C are stored in the sequence: {C_00, C_10, ..., C_m0,
// C_01, C_11, ..., C_m1, C_02, ..., C_0n, C_1n, ..., C_mn}
void MMult1(long m, long n, long k, double *a, double *b, double *c) {
// optimal loop ordering for column-major matrices
#pragma omp parallel for schedule(static)
for (long j = 0; j < n; j++) {
for (long p = 0; p < k; p++) {
double B_pj = b[p+j*k];
for (long i = 0; i < m; i++) {
double A_ip = a[i+p*m];
double C_ij = c[i+j*m];
C_ij = C_ij + A_ip * B_pj;
c[i+j*m] = C_ij;
}
}
}
}
// compute the partial dot product of N-vectors x and y.
// sum stores terms of the dot product
__global__
void kernel_dot2(long N, double* dotprod, const double* x, const double* y){
__shared__ double shared_xy[BLOCK_SIZE]; // element-wise product of x and y
long idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) shared_xy[threadIdx.x] = x[idx] * y[idx];
else shared_xy[threadIdx.x] = 0;
__syncthreads();
// x >>= 1 means "set x to itself shifted by one bit to the right", i.e., a divison by 2
// write to memory with threadIdx rather than ``index''
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
shared_xy[threadIdx.x] += shared_xy[threadIdx.x + s];
}
__syncthreads();
}
// write to global memory
if (threadIdx.x == 0) dotprod[blockIdx.x] = shared_xy[threadIdx.x];
}
// the summation kernel from our class example gpu16.cu
__global__ void reduction_kernel2(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
// x >>= 1 means "set x to itself shifted by one bit to the right", i.e., a divison by 2
// write to memory with threadIdx rather than ``index''
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// write to global memory
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main(int argc, char** argv) {
long N = (1UL<<25); // 2^25
double *x, *y, *device_dotprod;
double dotprod, dotprod_ref;
hipMallocManaged(&x, N * sizeof(double));
hipMallocManaged(&y, N * sizeof(double));
// initialize data and get reference solution
dotprod_ref = 0;
for(long i=0; i<N; i++) {
x[i] = drand48();
y[i] = drand48();
dotprod_ref += x[i] * y[i];
}
// make a buffer for efficient memory hipSuccess
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
hipMalloc(&device_dotprod, N_work*sizeof(double));
// now check with GPU
long N_block = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( kernel_dot2), dim3(N_block),dim3(BLOCK_SIZE), 0, 0, N, device_dotprod, x, y);
while (N_block > 1) {
long N_reduce = N_block; // number of dot product terms to add up
N_block = (N_block+BLOCK_SIZE-1)/(BLOCK_SIZE); // number of blocks in this new, reduced vector
hipLaunchKernelGGL(( reduction_kernel2), dim3(N_block),dim3(BLOCK_SIZE), 0, 0, device_dotprod + N_reduce, device_dotprod, N_reduce); // reduce; store new terms shifted
device_dotprod += N_reduce; // trace/copy the shift
}
hipMemcpy(&dotprod, device_dotprod, sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// compute Error
double err = fabs(dotprod - dotprod_ref);
printf("Dot product test:\n");
printf("GPU Error = %1.5e\n", err);
hipFree(x);
hipFree(y);
hipFree(device_dotprod);
/*
Matrix multiplication on GPU
*/
// matrix A is MxN
long M = (1UL<<10);
N = (1UL<<10);
// compute the product y = A*x
double *A, *device_A, *device_x, *y_ref;
hipMalloc(&device_A, M*N * sizeof(double));
hipMalloc(&device_x, N * sizeof(double));
A = (double*) aligned_malloc(M*N * sizeof(double));
x = (double*) aligned_malloc( N * sizeof(double));
y = (double*) aligned_malloc(M * sizeof(double));
y_ref = (double*) aligned_malloc(M * sizeof(double));
// initialize A and x
for(long i=0; i<M*N; i++) A[i] = drand48();
for(long i=0; i<N; i++) x[i] = drand48();
// compute the reference Matvec on the CPU
Timer t;
t.tic();
for(long i=0; i<M; i++) y_ref[i] = 0.0;
MMult1(M, 1, N, A, x, y_ref);
double time_cpu = t.toc();
/*
compare with the GPU via M dot products
*/
// copy constant data to device
t.tic();
hipMemcpyAsync(device_A, A, M*N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(device_x, x, N * sizeof(double), hipMemcpyHostToDevice);
// make a buffer for efficient memory hipSuccess
N_work = 1;
for(long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
hipMalloc(&device_dotprod, N_work*sizeof(double));
double *Arow;
hipMallocManaged(&Arow, N * sizeof(double));
for(long i=0; i<M; i++) {
// construct Arow
for(long j=0; j<N; j++) Arow[j] = A[i+j*N];
// compute the inner product
long N_block = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( kernel_dot2), dim3(N_block),dim3(BLOCK_SIZE), 0, 0, N, device_dotprod, Arow, device_x);
while (N_block > 1) {
long N_reduce = N_block; // number of dot product terms to add up
N_block = (N_block+BLOCK_SIZE-1)/(BLOCK_SIZE); // number of blocks in this new, reduced vector
hipLaunchKernelGGL(( reduction_kernel2), dim3(N_block),dim3(BLOCK_SIZE), 0, 0, device_dotprod + N_reduce, device_dotprod, N_reduce); // reduce; store new terms shifted
device_dotprod += N_reduce; // trace/copy the shift
}
hipMemcpy(y+i, device_dotprod, sizeof(double), hipMemcpyDeviceToHost); // the dot product is y[i]
hipDeviceSynchronize();
}
double time_gpu = t.toc();
// compute Error
err = 0;
for(long i=0; i<M; i++) err += fabs(y[i] - y_ref[i]);
printf("Matvec computation:\n");
printf("GPU Error = %1.5e\n", err);
// compute Bandwidth
double band = 4*M*N; // M inner products
band *= sizeof(double) / 1e9;
printf("------------------------------------------\n");
printf(" CPU GPU\n");
printf("Bandwidth (GB/s) %1.3e %1.3e\n", band/time_cpu, band/time_gpu);
hipFree(A);
hipFree(Arow);
hipFree(x);
hipFree(y);
hipFree(device_dotprod);
return 0;
}
| ee4ca7ccb09d6a9009702a62fbf6c0a83631e70a.cu |
#include <algorithm>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
// number of threads per block.
// multiple of warp size, 32.
#define BLOCK_SIZE 1024
// Note: matrices are stored in column major order; i.e. the array elements in
// the (m x n) matrix C are stored in the sequence: {C_00, C_10, ..., C_m0,
// C_01, C_11, ..., C_m1, C_02, ..., C_0n, C_1n, ..., C_mn}
void MMult1(long m, long n, long k, double *a, double *b, double *c) {
// optimal loop ordering for column-major matrices
#pragma omp parallel for schedule(static)
for (long j = 0; j < n; j++) {
for (long p = 0; p < k; p++) {
double B_pj = b[p+j*k];
for (long i = 0; i < m; i++) {
double A_ip = a[i+p*m];
double C_ij = c[i+j*m];
C_ij = C_ij + A_ip * B_pj;
c[i+j*m] = C_ij;
}
}
}
}
// compute the partial dot product of N-vectors x and y.
// sum stores terms of the dot product
__global__
void kernel_dot2(long N, double* dotprod, const double* x, const double* y){
__shared__ double shared_xy[BLOCK_SIZE]; // element-wise product of x and y
long idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) shared_xy[threadIdx.x] = x[idx] * y[idx];
else shared_xy[threadIdx.x] = 0;
__syncthreads();
// x >>= 1 means "set x to itself shifted by one bit to the right", i.e., a divison by 2
// write to memory with threadIdx rather than ``index''
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
shared_xy[threadIdx.x] += shared_xy[threadIdx.x + s];
}
__syncthreads();
}
// write to global memory
if (threadIdx.x == 0) dotprod[blockIdx.x] = shared_xy[threadIdx.x];
}
// the summation kernel from our class example gpu16.cu
__global__ void reduction_kernel2(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
// x >>= 1 means "set x to itself shifted by one bit to the right", i.e., a divison by 2
// write to memory with threadIdx rather than ``index''
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
// write to global memory
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main(int argc, char** argv) {
long N = (1UL<<25); // 2^25
double *x, *y, *device_dotprod;
double dotprod, dotprod_ref;
cudaMallocManaged(&x, N * sizeof(double));
cudaMallocManaged(&y, N * sizeof(double));
// initialize data and get reference solution
dotprod_ref = 0;
for(long i=0; i<N; i++) {
x[i] = drand48();
y[i] = drand48();
dotprod_ref += x[i] * y[i];
}
// make a buffer for efficient memory cudaSuccess
long N_work = 1;
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
cudaMalloc(&device_dotprod, N_work*sizeof(double));
// now check with GPU
long N_block = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
kernel_dot2<<<N_block,BLOCK_SIZE>>>(N, device_dotprod, x, y);
while (N_block > 1) {
long N_reduce = N_block; // number of dot product terms to add up
N_block = (N_block+BLOCK_SIZE-1)/(BLOCK_SIZE); // number of blocks in this new, reduced vector
reduction_kernel2<<<N_block,BLOCK_SIZE>>>(device_dotprod + N_reduce, device_dotprod, N_reduce); // reduce; store new terms shifted
device_dotprod += N_reduce; // trace/copy the shift
}
cudaMemcpy(&dotprod, device_dotprod, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// compute Error
double err = fabs(dotprod - dotprod_ref);
printf("Dot product test:\n");
printf("GPU Error = %1.5e\n", err);
cudaFree(x);
cudaFree(y);
cudaFree(device_dotprod);
/*
Matrix multiplication on GPU
*/
// matrix A is MxN
long M = (1UL<<10);
N = (1UL<<10);
// compute the product y = A*x
double *A, *device_A, *device_x, *y_ref;
cudaMalloc(&device_A, M*N * sizeof(double));
cudaMalloc(&device_x, N * sizeof(double));
A = (double*) aligned_malloc(M*N * sizeof(double));
x = (double*) aligned_malloc( N * sizeof(double));
y = (double*) aligned_malloc(M * sizeof(double));
y_ref = (double*) aligned_malloc(M * sizeof(double));
// initialize A and x
for(long i=0; i<M*N; i++) A[i] = drand48();
for(long i=0; i<N; i++) x[i] = drand48();
// compute the reference Matvec on the CPU
Timer t;
t.tic();
for(long i=0; i<M; i++) y_ref[i] = 0.0;
MMult1(M, 1, N, A, x, y_ref);
double time_cpu = t.toc();
/*
compare with the GPU via M dot products
*/
// copy constant data to device
t.tic();
cudaMemcpyAsync(device_A, A, M*N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(device_x, x, N * sizeof(double), cudaMemcpyHostToDevice);
// make a buffer for efficient memory cudaSuccess
N_work = 1;
for(long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
cudaMalloc(&device_dotprod, N_work*sizeof(double));
double *Arow;
cudaMallocManaged(&Arow, N * sizeof(double));
for(long i=0; i<M; i++) {
// construct Arow
for(long j=0; j<N; j++) Arow[j] = A[i+j*N];
// compute the inner product
long N_block = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
kernel_dot2<<<N_block,BLOCK_SIZE>>>(N, device_dotprod, Arow, device_x);
while (N_block > 1) {
long N_reduce = N_block; // number of dot product terms to add up
N_block = (N_block+BLOCK_SIZE-1)/(BLOCK_SIZE); // number of blocks in this new, reduced vector
reduction_kernel2<<<N_block,BLOCK_SIZE>>>(device_dotprod + N_reduce, device_dotprod, N_reduce); // reduce; store new terms shifted
device_dotprod += N_reduce; // trace/copy the shift
}
cudaMemcpy(y+i, device_dotprod, sizeof(double), cudaMemcpyDeviceToHost); // the dot product is y[i]
cudaDeviceSynchronize();
}
double time_gpu = t.toc();
// compute Error
err = 0;
for(long i=0; i<M; i++) err += fabs(y[i] - y_ref[i]);
printf("Matvec computation:\n");
printf("GPU Error = %1.5e\n", err);
// compute Bandwidth
double band = 4*M*N; // M inner products
band *= sizeof(double) / 1e9;
printf("------------------------------------------\n");
printf(" CPU GPU\n");
printf("Bandwidth (GB/s) %1.3e %1.3e\n", band/time_cpu, band/time_gpu);
cudaFree(A);
cudaFree(Arow);
cudaFree(x);
cudaFree(y);
cudaFree(device_dotprod);
return 0;
}
|
ac599551aa04c7acafaf860054d8c184899674dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
namespace at {
namespace native {
Tensor int_repr_quant_cuda(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quant_cuda", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIterator();
iter.add_output(dst);
iter.add_input(self);
iter.dont_compute_common_dtype();
iter.build();
gpu_kernel(iter, []GPU_LAMBDA(scalar_t value) -> underlying_t { return value.val_; });
});
return dst;
}
} // namespace native
} // namespace at
| ac599551aa04c7acafaf860054d8c184899674dc.cu | #include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
Tensor int_repr_quant_cuda(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quant_cuda", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIterator();
iter.add_output(dst);
iter.add_input(self);
iter.dont_compute_common_dtype();
iter.build();
gpu_kernel(iter, []GPU_LAMBDA(scalar_t value) -> underlying_t { return value.val_; });
});
return dst;
}
} // namespace native
} // namespace at
|
2c451c991f6f296bb5f7888660e8945ac2acba05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr = in_data[proposal::kClsProb].dptr<real_t>()
+ fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_,
&anchors[0], sizeof(float) * anchors.size(),
hipMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(hipMemcpy(&cpu_im_info[0], im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
hipMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_pre_nms_top_n, workspace_proposals.dptr_, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size,
out.dptr_, out_score.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| 2c451c991f6f296bb5f7888660e8945ac2acba05.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(cudaFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr = in_data[proposal::kClsProb].dptr<real_t>()
+ fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
FRCNN_CUDA_CHECK(cudaMemcpy(workspace_proposals.dptr_,
&anchors[0], sizeof(float) * anchors.size(),
cudaMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(cudaMemcpy(&cpu_im_info[0], im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
cudaMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(
rpn_pre_nms_top_n, workspace_proposals.dptr_, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(cudaFree(score_ptr));
FRCNN_CUDA_CHECK(cudaFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(cudaMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
cudaMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size,
out.dptr_, out_score.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(cudaFree(keep));
FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
313edf80424eefcabd5606ad0c5df9feaadefc6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"activationLayer.hpp"
#include"config/configBase.hpp"
#include"common/cudnn.hpp"
#include"common/common.hpp"
#include"test/test.hpp"
/*
* Create CUDNN handles
*/
template<typename Ntype>
void ActivationLayer<Ntype>::createHandles()
{
CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_tensorDesc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_tensorDesc));
CUDNN_CHECK(cudnnCreateActivationDescriptor(&activDesc));
}
/*
* Destroy CUDNN Handles
*/
template<typename Ntype>
void ActivationLayer<Ntype>::destroyHandles()
{
CUDNN_CHECK(cudnnDestroyTensorDescriptor(bottom_tensorDesc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(top_tensorDesc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(activDesc));
}
template<typename Ntype>
void ActivationLayer<Ntype>::ReShape()
{
this->m_top = new NDMatrix<Ntype>(this->m_number, this->m_channels, this->m_height, this->m_width);
}
/*
* Activation layer constructor
*/
template<typename Ntype>
ActivationLayer<Ntype>::ActivationLayer(string name)
{
this->m_name = name;
this->m_inputName = " ";
this->m_bottom = NULL;
this->m_top = NULL;
this->m_loss = 0;
this->m_prevLayer.clear();
this->m_nextLayer.clear();
activDesc = NULL;
bottom_tensorDesc = NULL;
top_tensorDesc = NULL;
ActivationLayerConfig * curConfig = (ActivationLayerConfig*) ConfigTable::getInstance()->getLayerByName(this->m_name);
string preLayerName = curConfig->getInput();
Layer<Ntype>* prev_Layer = (Layer<Ntype>*) LayerContainer<Ntype>::getInstance()->getLayerByName(preLayerName);
this->m_bottom = prev_Layer->getTop();
CHECK(this->m_bottom);
this->m_inputChannels = this->m_bottom->ND_channels();
this->m_number = this->m_bottom->ND_num();
this->m_channels = this->m_bottom->ND_channels();
this->m_height = this->m_bottom->ND_height();
this->m_width = this->m_bottom->ND_width();
ActivationMode = curConfig->getNonLinearType();
ReShape();
this->createHandles();
}
/*
* Deep copy constructor
*/
//ActivationLayer::ActivationLayer(const ActivationLayer* layer)
//{
// srcData = NULL;
// dstData = NULL;
// diffData = NULL;
// prevLayer.clear();
// nextLayer.clear();
// activDesc = NULL;
// bottom_tensorDesc = NULL;
// top_tensorDesc = NULL;
// srcDiffTensorDesc = NULL;
// dstDiffTensorDesc = NULL;
//
// static int idx = 0;
// _name = layer->_name + string("_") + int_to_string(idx);
// idx ++;
// _inputName = layer->_inputName;
// inputAmount = layer->inputAmount;
// inputImageDim = layer->inputImageDim;
// number = layer->number;
// channels = layer->channels;
// height = layer->height;
// width = layer->width;
// outputSize = layer->outputSize;
// ActivationMode = layer->ActivationMode;
//
// MemoryMonitor::getInstance()->gpuMallocMemory((void**)&dstData, number * channels * height * width * sizeof(float));
// MemoryMonitor::getInstance()->gpuMallocMemory((void**)&diffData, number * channels * height * width * sizeof(float));
//
// this->createHandles();
// cout<<"Activation-copy"<<endl;
//}
/*
* Destructor
*/
template<typename Ntype>
ActivationLayer<Ntype>::~ActivationLayer()
{
delete this->m_top;
destroyHandles();
}
/*
* LRELU activation function forward compute
*/
__global__ void LreluForward(const float* srcData, float* dstData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstData[index] = srcData[index] > 0 ? srcData[index] : srcData[index] * 0.01;
}
}
}
/*
* Activation forward propagation
*/
template<typename Ntype>
void ActivationLayer<Ntype>::Forward(Phase Phase)
{
this->m_bottom = this->m_prevLayer[0]->getTop();
if(ActivationMode == ACTIVATION_LRELU)
{
int data_size = this->m_number * this->m_channels * this->m_height * this->m_width;
int num_threads = 256;
int num_block = (data_size + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( LreluForward), dim3(num_block), dim3(num_threads), 0, 0, (float*)this->m_bottom->gpu_data(), (float*)this->m_top->mutable_gpu_data(), data_size);
hipDeviceSynchronize();
}
else
{
cudnnActivationMode = (cudnnActivationMode_t)ActivationMode;
CUDNN_CHECK(cudnnSetActivationDescriptor(activDesc,
cudnnActivationMode,
CUDNN_PROPAGATE_NAN,
0.0));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(bottom_tensorDesc,
cuDNN<float>::getInstance()->GetTensorFormat(),
cuDNN<float>::getInstance()->GetDataType(),
this->m_number,
this->m_channels,
this->m_height,
this->m_width));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(top_tensorDesc,
cuDNN<float>::getInstance()->GetTensorFormat(),
cuDNN<float>::getInstance()->GetDataType(),
this->m_number,
this->m_channels,
this->m_height,
this->m_width));
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CHECK(cudnnActivationForward(cuDNN<float>::getInstance()->GetcudnnHandle(),
activDesc,
&alpha,
bottom_tensorDesc,
this->m_bottom->gpu_data(),
&beta,
top_tensorDesc,
this->m_top->mutable_gpu_data()));
}
}
/*
* LRELU BackWard Compute
*/
__global__ void LreluBackward(float* srcDiff, float* dstDiff, float* srcData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstDiff[index] = srcDiff[index] * ((srcData[index] > 0) + (srcData[index] <= 0) * 0.01);
}
}
}
/*
* Activation Backward Propagation
*/
template<typename Ntype>
void ActivationLayer<Ntype>::Backward()
{
if(ActivationMode == ACTIVATION_LRELU)
{
//int nIndex = m_nCurBranchIndex;
int data_size = this->m_number * this->m_channels * this->m_height * this->m_width;
int num_threads = 256;
int num_block = (data_size + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( LreluBackward), dim3(num_block), dim3(num_threads), 0, 0, (float*)this->m_top->gpu_diff(), (float*)this->m_bottom->mutable_gpu_diff(), (float*)this->m_bottom->gpu_data(), data_size);
hipDeviceSynchronize();
}
else
{
cudnnActivationMode = (cudnnActivationMode_t)ActivationMode;
float alpha = 1.0f;
float beta = 0.0f;
//int nIndex = m_nCurBranchIndex;
CUDNN_CHECK(cudnnActivationBackward(cuDNN<float>::getInstance()->GetcudnnHandle(),
activDesc,
&alpha,
top_tensorDesc,
this->m_top->gpu_data(),
top_tensorDesc,
this->m_top->gpu_diff(),
bottom_tensorDesc,
this->m_bottom->gpu_data(),
&beta,
bottom_tensorDesc,
this->m_bottom->mutable_gpu_diff()));
}
}
INSTANTIATE_CLASS(ActivationLayer);
| 313edf80424eefcabd5606ad0c5df9feaadefc6d.cu | #include"activationLayer.hpp"
#include"config/configBase.hpp"
#include"common/cudnn.hpp"
#include"common/common.hpp"
#include"test/test.hpp"
/*
* Create CUDNN handles
*/
template<typename Ntype>
void ActivationLayer<Ntype>::createHandles()
{
CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_tensorDesc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_tensorDesc));
CUDNN_CHECK(cudnnCreateActivationDescriptor(&activDesc));
}
/*
* Destroy CUDNN Handles
*/
template<typename Ntype>
void ActivationLayer<Ntype>::destroyHandles()
{
CUDNN_CHECK(cudnnDestroyTensorDescriptor(bottom_tensorDesc));
CUDNN_CHECK(cudnnDestroyTensorDescriptor(top_tensorDesc));
CUDNN_CHECK(cudnnDestroyActivationDescriptor(activDesc));
}
template<typename Ntype>
void ActivationLayer<Ntype>::ReShape()
{
this->m_top = new NDMatrix<Ntype>(this->m_number, this->m_channels, this->m_height, this->m_width);
}
/*
* Activation layer constructor
*/
template<typename Ntype>
ActivationLayer<Ntype>::ActivationLayer(string name)
{
this->m_name = name;
this->m_inputName = " ";
this->m_bottom = NULL;
this->m_top = NULL;
this->m_loss = 0;
this->m_prevLayer.clear();
this->m_nextLayer.clear();
activDesc = NULL;
bottom_tensorDesc = NULL;
top_tensorDesc = NULL;
ActivationLayerConfig * curConfig = (ActivationLayerConfig*) ConfigTable::getInstance()->getLayerByName(this->m_name);
string preLayerName = curConfig->getInput();
Layer<Ntype>* prev_Layer = (Layer<Ntype>*) LayerContainer<Ntype>::getInstance()->getLayerByName(preLayerName);
this->m_bottom = prev_Layer->getTop();
CHECK(this->m_bottom);
this->m_inputChannels = this->m_bottom->ND_channels();
this->m_number = this->m_bottom->ND_num();
this->m_channels = this->m_bottom->ND_channels();
this->m_height = this->m_bottom->ND_height();
this->m_width = this->m_bottom->ND_width();
ActivationMode = curConfig->getNonLinearType();
ReShape();
this->createHandles();
}
/*
* Deep copy constructor
*/
//ActivationLayer::ActivationLayer(const ActivationLayer* layer)
//{
// srcData = NULL;
// dstData = NULL;
// diffData = NULL;
// prevLayer.clear();
// nextLayer.clear();
// activDesc = NULL;
// bottom_tensorDesc = NULL;
// top_tensorDesc = NULL;
// srcDiffTensorDesc = NULL;
// dstDiffTensorDesc = NULL;
//
// static int idx = 0;
// _name = layer->_name + string("_") + int_to_string(idx);
// idx ++;
// _inputName = layer->_inputName;
// inputAmount = layer->inputAmount;
// inputImageDim = layer->inputImageDim;
// number = layer->number;
// channels = layer->channels;
// height = layer->height;
// width = layer->width;
// outputSize = layer->outputSize;
// ActivationMode = layer->ActivationMode;
//
// MemoryMonitor::getInstance()->gpuMallocMemory((void**)&dstData, number * channels * height * width * sizeof(float));
// MemoryMonitor::getInstance()->gpuMallocMemory((void**)&diffData, number * channels * height * width * sizeof(float));
//
// this->createHandles();
// cout<<"Activation-copy"<<endl;
//}
/*
* Destructor
*/
template<typename Ntype>
ActivationLayer<Ntype>::~ActivationLayer()
{
delete this->m_top;
destroyHandles();
}
/*
* LRELU activation function forward compute
*/
__global__ void LreluForward(const float* srcData, float* dstData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstData[index] = srcData[index] > 0 ? srcData[index] : srcData[index] * 0.01;
}
}
}
/*
* Activation forward propagation
*/
template<typename Ntype>
void ActivationLayer<Ntype>::Forward(Phase Phase)
{
this->m_bottom = this->m_prevLayer[0]->getTop();
if(ActivationMode == ACTIVATION_LRELU)
{
int data_size = this->m_number * this->m_channels * this->m_height * this->m_width;
int num_threads = 256;
int num_block = (data_size + num_threads - 1) / num_threads;
LreluForward<<<num_block, num_threads>>>((float*)this->m_bottom->gpu_data(), (float*)this->m_top->mutable_gpu_data(), data_size);
cudaThreadSynchronize();
}
else
{
cudnnActivationMode = (cudnnActivationMode_t)ActivationMode;
CUDNN_CHECK(cudnnSetActivationDescriptor(activDesc,
cudnnActivationMode,
CUDNN_PROPAGATE_NAN,
0.0));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(bottom_tensorDesc,
cuDNN<float>::getInstance()->GetTensorFormat(),
cuDNN<float>::getInstance()->GetDataType(),
this->m_number,
this->m_channels,
this->m_height,
this->m_width));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(top_tensorDesc,
cuDNN<float>::getInstance()->GetTensorFormat(),
cuDNN<float>::getInstance()->GetDataType(),
this->m_number,
this->m_channels,
this->m_height,
this->m_width));
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CHECK(cudnnActivationForward(cuDNN<float>::getInstance()->GetcudnnHandle(),
activDesc,
&alpha,
bottom_tensorDesc,
this->m_bottom->gpu_data(),
&beta,
top_tensorDesc,
this->m_top->mutable_gpu_data()));
}
}
/*
* LRELU BackWard Compute
*/
__global__ void LreluBackward(float* srcDiff, float* dstDiff, float* srcData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstDiff[index] = srcDiff[index] * ((srcData[index] > 0) + (srcData[index] <= 0) * 0.01);
}
}
}
/*
* Activation Backward Propagation
*/
template<typename Ntype>
void ActivationLayer<Ntype>::Backward()
{
if(ActivationMode == ACTIVATION_LRELU)
{
//int nIndex = m_nCurBranchIndex;
int data_size = this->m_number * this->m_channels * this->m_height * this->m_width;
int num_threads = 256;
int num_block = (data_size + num_threads - 1) / num_threads;
LreluBackward<<<num_block, num_threads>>>((float*)this->m_top->gpu_diff(), (float*)this->m_bottom->mutable_gpu_diff(), (float*)this->m_bottom->gpu_data(), data_size);
cudaThreadSynchronize();
}
else
{
cudnnActivationMode = (cudnnActivationMode_t)ActivationMode;
float alpha = 1.0f;
float beta = 0.0f;
//int nIndex = m_nCurBranchIndex;
CUDNN_CHECK(cudnnActivationBackward(cuDNN<float>::getInstance()->GetcudnnHandle(),
activDesc,
&alpha,
top_tensorDesc,
this->m_top->gpu_data(),
top_tensorDesc,
this->m_top->gpu_diff(),
bottom_tensorDesc,
this->m_bottom->gpu_data(),
&beta,
bottom_tensorDesc,
this->m_bottom->mutable_gpu_diff()));
}
}
INSTANTIATE_CLASS(ActivationLayer);
|
0c59a61c69b7113273fd34defac038aba664896b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "integrals.h"
__global__ void transpose_pitched(float *images, size_t pitch) {
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
float temp = img[(threadIdx.y*blockDim.x) + threadIdx.x];
__syncthreads();
img[(threadIdx.x*blockDim.x) + threadIdx.y] = temp;
}
__global__ void transpose(struct Mat *images) {
float temp = images[blockIdx.x].values[(threadIdx.y*blockDim.x) + threadIdx.x];
__syncthreads();
images[blockIdx.x].values[(threadIdx.x*blockDim.x)+threadIdx.y] = temp;
}
__global__ void parallel_scan_pitched(float *images, int rows, size_t pitch) {
float temp_val = 0.0;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/o double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
if (offset >= 0) {
temp_val = img[(threadIdx.y*rows)+threadIdx.x] + img[(threadIdx.y*rows)+offset];
}
__syncthreads();
if (offset >= 0) {
img[(threadIdx.y*rows)+threadIdx.x] = temp_val;
}
}
}
__global__ void parallel_scan_shared_mem_sb_pitched(float *images, int rows, size_t pitch) {
// create shared memory array
__shared__ float temp[576];
float temp_val = 0.0;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
// each thread pulls one pixel into shared
temp[(threadIdx.y*rows)+threadIdx.x] = img[(threadIdx.y*rows)+threadIdx.x];
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/o double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
if (offset >= 0) {
temp_val = temp[(threadIdx.y*rows)+threadIdx.x] + temp[(threadIdx.y*rows)+offset];
}
__syncthreads();
if (offset >= 0) {
temp[(threadIdx.y*rows)+threadIdx.x] = temp_val;
}
}
img[(threadIdx.y*rows)+threadIdx.x] = temp[(threadIdx.y*rows)+threadIdx.x];
}
__global__ void parallel_scan_shared_mem_db_pitched(float *images, int rows, size_t pitch) {
// create shared memory arrays
if (blockIdx.x==0 && threadIdx.x==0) {
printf("test\n");
}
__shared__ float temp0[576];
__shared__ float temp1[576];
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
if (blockIdx.x==0 && threadIdx.x==0) {
printf("%f\n",img[0]);
}
// create pointers to shared memory arrays for double buffering
float *source = temp0;
float *dest = temp1;
float *swap;
int temp_val;
float part_sum;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// each thread pulls one pixel into shared
temp_val = img[(threadIdx.y*rows)+threadIdx.x];
temp0[(threadIdx.y*rows)+threadIdx.x] = temp_val;
temp1[(threadIdx.y*rows)+threadIdx.x] = temp_val;
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/ double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
part_sum = source[(threadIdx.y*rows)+threadIdx.x];
if (offset >= 0) {
part_sum += source[(threadIdx.y*rows)+offset];
}
dest[(threadIdx.y*rows)+threadIdx.x] = part_sum;
swap = dest;
dest = source;
source = swap;
}
img[(threadIdx.y*rows)+threadIdx.x] = source[(threadIdx.y*rows)+threadIdx.x];
}
__global__ void parallel_scan_shared_mem_db(struct Mat *images, int rows) {
// create shared memory arrays
__shared__ float temp0[576];
__shared__ float temp1[576];
// create pointers to shared memory arrays for double buffering
float *source = temp0;
float *dest = temp1;
float *swap;
int temp_val;
float part_sum;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// each thread pulls one pixel into shared
temp_val = images[blockIdx.x].values[(threadIdx.y*rows)+threadIdx.x];
temp0[(threadIdx.y*rows)+threadIdx.x] = temp_val;
temp1[(threadIdx.y*rows)+threadIdx.x] = temp_val;
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/ double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
part_sum = source[(threadIdx.y*rows)+threadIdx.x];
if (offset >= 0) {
part_sum += source[(threadIdx.y*rows)+offset];
}
dest[(threadIdx.y*rows)+threadIdx.x] = part_sum;
swap = dest;
dest = source;
source = swap;
}
images[blockIdx.x].values[(threadIdx.y*rows)+threadIdx.x] = source[(threadIdx.y*rows)+threadIdx.x];
}
void compute_integrals_d(struct Mat *images, int total_samples) {
int rows = images[0].rows;
int cols = images[0].cols;
hipError_t err;
dim3 grid_size(total_samples,1);
dim3 block_size(rows,cols);
int img_bytes = sizeof(struct Mat) * total_samples;
struct Mat *images_d;
struct Mat *images_h = (struct Mat*) malloc(img_bytes);
int val_bytes = sizeof(float) * rows*cols;
for (int i=0; i<total_samples; ++i) {
float *values;
hipMalloc(&values,val_bytes);
hipMemcpy(values,images[i].values,val_bytes,hipMemcpyHostToDevice);
images_h[i].rows = images[i].rows;
images_h[i].cols = images[i].cols;
images_h[i].values = values;
}
hipMalloc(&images_d,img_bytes);
hipMemcpy(images_d,images_h,img_bytes,hipMemcpyHostToDevice);
/*** compute row sum ***/
hipLaunchKernelGGL(( parallel_scan_shared_mem_db), dim3(grid_size),dim3(block_size) , 0, 0, images_d,rows);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #1: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
hipLaunchKernelGGL(( transpose), dim3(grid_size),dim3(block_size) , 0, 0, images_d);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #1: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** compute column sum ***/
hipLaunchKernelGGL(( parallel_scan_shared_mem_db), dim3(grid_size),dim3(block_size) , 0, 0, images_d,rows);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #2: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
hipLaunchKernelGGL(( transpose), dim3(grid_size),dim3(block_size) , 0, 0, images_d);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #2: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i=0; i<total_samples; ++i) {
hipMemcpy(images[i].values,images_h[i].values,val_bytes,hipMemcpyDeviceToHost);
hipFree(images_h[i].values);
}
hipFree(images_d);
}
void compute_integrals_d_pitched(struct Mat *images, int total_samples) {
int rows = images[0].rows;
int cols = images[0].cols;
size_t dpitch;
size_t hpitch = rows*cols*sizeof(float);
int width = (rows*cols) * sizeof(float);
int height = total_samples;
hipError_t err;
dim3 grid_size(total_samples,1);
dim3 block_size(rows,cols);
/*** compute row sum ***/
float *images_d;
float **images_h = (float**) malloc(sizeof(float*) * total_samples);
for (int i=0; i<total_samples; ++i) {
images_h[i] = (float*) malloc(sizeof(float) * rows*cols);
for (int j=0; j<rows*cols; ++j) {
images_h[i][j] = images[i].values[j];
}
}
hipMallocPitch(&images_d, &dpitch, width, height);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy2D(images_d, dpitch, images_h, hpitch, width, height, hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( parallel_scan_shared_mem_db_pitched), dim3(grid_size),dim3(block_size) , 0, 0, images_d,rows,dpitch);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #1: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
hipLaunchKernelGGL(( transpose_pitched), dim3(grid_size),dim3(block_size) , 0, 0, images_d,dpitch);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #1: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** compute column sum ***/
hipLaunchKernelGGL(( parallel_scan_shared_mem_db_pitched), dim3(grid_size),dim3(block_size) , 0, 0, images_d,rows,dpitch);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #2: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
hipLaunchKernelGGL(( transpose_pitched), dim3(grid_size),dim3(block_size) , 0, 0, images_d,dpitch);
// grab error if kernel does not launch
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #2: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy2D(images_h, hpitch, images_d, dpitch, width, height, hipMemcpyDeviceToHost);
for (int i=0; i<total_samples; ++i) {
for (int j=0; j<rows*cols; ++j) {
images[i].values[j] = images_h[i][j];
}
}
hipFree(images_d);
}
| 0c59a61c69b7113273fd34defac038aba664896b.cu | #include "integrals.h"
__global__ void transpose_pitched(float *images, size_t pitch) {
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
float temp = img[(threadIdx.y*blockDim.x) + threadIdx.x];
__syncthreads();
img[(threadIdx.x*blockDim.x) + threadIdx.y] = temp;
}
__global__ void transpose(struct Mat *images) {
float temp = images[blockIdx.x].values[(threadIdx.y*blockDim.x) + threadIdx.x];
__syncthreads();
images[blockIdx.x].values[(threadIdx.x*blockDim.x)+threadIdx.y] = temp;
}
__global__ void parallel_scan_pitched(float *images, int rows, size_t pitch) {
float temp_val = 0.0;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/o double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
if (offset >= 0) {
temp_val = img[(threadIdx.y*rows)+threadIdx.x] + img[(threadIdx.y*rows)+offset];
}
__syncthreads();
if (offset >= 0) {
img[(threadIdx.y*rows)+threadIdx.x] = temp_val;
}
}
}
__global__ void parallel_scan_shared_mem_sb_pitched(float *images, int rows, size_t pitch) {
// create shared memory array
__shared__ float temp[576];
float temp_val = 0.0;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
// each thread pulls one pixel into shared
temp[(threadIdx.y*rows)+threadIdx.x] = img[(threadIdx.y*rows)+threadIdx.x];
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/o double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
if (offset >= 0) {
temp_val = temp[(threadIdx.y*rows)+threadIdx.x] + temp[(threadIdx.y*rows)+offset];
}
__syncthreads();
if (offset >= 0) {
temp[(threadIdx.y*rows)+threadIdx.x] = temp_val;
}
}
img[(threadIdx.y*rows)+threadIdx.x] = temp[(threadIdx.y*rows)+threadIdx.x];
}
__global__ void parallel_scan_shared_mem_db_pitched(float *images, int rows, size_t pitch) {
// create shared memory arrays
if (blockIdx.x==0 && threadIdx.x==0) {
printf("test\n");
}
__shared__ float temp0[576];
__shared__ float temp1[576];
// get image for current block
float *img = (float*) ((char*) images + blockIdx.x*pitch);
if (blockIdx.x==0 && threadIdx.x==0) {
printf("%f\n",img[0]);
}
// create pointers to shared memory arrays for double buffering
float *source = temp0;
float *dest = temp1;
float *swap;
int temp_val;
float part_sum;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// each thread pulls one pixel into shared
temp_val = img[(threadIdx.y*rows)+threadIdx.x];
temp0[(threadIdx.y*rows)+threadIdx.x] = temp_val;
temp1[(threadIdx.y*rows)+threadIdx.x] = temp_val;
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/ double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
part_sum = source[(threadIdx.y*rows)+threadIdx.x];
if (offset >= 0) {
part_sum += source[(threadIdx.y*rows)+offset];
}
dest[(threadIdx.y*rows)+threadIdx.x] = part_sum;
swap = dest;
dest = source;
source = swap;
}
img[(threadIdx.y*rows)+threadIdx.x] = source[(threadIdx.y*rows)+threadIdx.x];
}
__global__ void parallel_scan_shared_mem_db(struct Mat *images, int rows) {
// create shared memory arrays
__shared__ float temp0[576];
__shared__ float temp1[576];
// create pointers to shared memory arrays for double buffering
float *source = temp0;
float *dest = temp1;
float *swap;
int temp_val;
float part_sum;
int offset = 0;
int max_stride = ceil(blockDim.x/2.0);
// each thread pulls one pixel into shared
temp_val = images[blockIdx.x].values[(threadIdx.y*rows)+threadIdx.x];
temp0[(threadIdx.y*rows)+threadIdx.x] = temp_val;
temp1[(threadIdx.y*rows)+threadIdx.x] = temp_val;
// build image integral per block (576 threads) via Kogge-Stone Parallel Scan Algo (w/ double buffering)
for (int stride=1; stride<=max_stride; stride*=2) {
__syncthreads();
offset = threadIdx.x - stride;
part_sum = source[(threadIdx.y*rows)+threadIdx.x];
if (offset >= 0) {
part_sum += source[(threadIdx.y*rows)+offset];
}
dest[(threadIdx.y*rows)+threadIdx.x] = part_sum;
swap = dest;
dest = source;
source = swap;
}
images[blockIdx.x].values[(threadIdx.y*rows)+threadIdx.x] = source[(threadIdx.y*rows)+threadIdx.x];
}
void compute_integrals_d(struct Mat *images, int total_samples) {
int rows = images[0].rows;
int cols = images[0].cols;
cudaError err;
dim3 grid_size(total_samples,1);
dim3 block_size(rows,cols);
int img_bytes = sizeof(struct Mat) * total_samples;
struct Mat *images_d;
struct Mat *images_h = (struct Mat*) malloc(img_bytes);
int val_bytes = sizeof(float) * rows*cols;
for (int i=0; i<total_samples; ++i) {
float *values;
cudaMalloc(&values,val_bytes);
cudaMemcpy(values,images[i].values,val_bytes,cudaMemcpyHostToDevice);
images_h[i].rows = images[i].rows;
images_h[i].cols = images[i].cols;
images_h[i].values = values;
}
cudaMalloc(&images_d,img_bytes);
cudaMemcpy(images_d,images_h,img_bytes,cudaMemcpyHostToDevice);
/*** compute row sum ***/
parallel_scan_shared_mem_db<<< grid_size,block_size >>>(images_d,rows);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #1: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
transpose<<< grid_size,block_size >>>(images_d);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #1: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** compute column sum ***/
parallel_scan_shared_mem_db<<< grid_size,block_size >>>(images_d,rows);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #2: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
transpose<<< grid_size,block_size >>>(images_d);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #2: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i=0; i<total_samples; ++i) {
cudaMemcpy(images[i].values,images_h[i].values,val_bytes,cudaMemcpyDeviceToHost);
cudaFree(images_h[i].values);
}
cudaFree(images_d);
}
void compute_integrals_d_pitched(struct Mat *images, int total_samples) {
int rows = images[0].rows;
int cols = images[0].cols;
size_t dpitch;
size_t hpitch = rows*cols*sizeof(float);
int width = (rows*cols) * sizeof(float);
int height = total_samples;
cudaError err;
dim3 grid_size(total_samples,1);
dim3 block_size(rows,cols);
/*** compute row sum ***/
float *images_d;
float **images_h = (float**) malloc(sizeof(float*) * total_samples);
for (int i=0; i<total_samples; ++i) {
images_h[i] = (float*) malloc(sizeof(float) * rows*cols);
for (int j=0; j<rows*cols; ++j) {
images_h[i][j] = images[i].values[j];
}
}
cudaMallocPitch(&images_d, &dpitch, width, height);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy2D(images_d, dpitch, images_h, hpitch, width, height, cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
parallel_scan_shared_mem_db_pitched<<< grid_size,block_size >>>(images_d,rows,dpitch);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #1: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
transpose_pitched<<< grid_size,block_size >>>(images_d,dpitch);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #1: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** compute column sum ***/
parallel_scan_shared_mem_db_pitched<<< grid_size,block_size >>>(images_d,rows,dpitch);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'parallel_scan_shared_mem_db' #2: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*** take transpose ***/
transpose_pitched<<< grid_size,block_size >>>(images_d,dpitch);
// grab error if kernel does not launch
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr,"An error occured during launch of kernel 'transpose' #2: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy2D(images_h, hpitch, images_d, dpitch, width, height, cudaMemcpyDeviceToHost);
for (int i=0; i<total_samples; ++i) {
for (int j=0; j<rows*cols; ++j) {
images[i].values[j] = images_h[i][j];
}
}
cudaFree(images_d);
}
|
3e6b81a763b5c8fcf6e830bb0829ce0c68de840d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "HungarianAlg.h"
#define NUM_THREADS 256
using namespace std;
int parallel = 1;
int DEBUG = 0;
__device__ double d_columnAnswer;
__device__ double d_rowAnswer;
int step3Counter = 0;
int step5Counter = 0;
AssignmentProblemSolver::AssignmentProblemSolver()
{
}
AssignmentProblemSolver::~AssignmentProblemSolver()
{
}
//
// timer
//
double read_timer( )
{
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
for( int i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
double AssignmentProblemSolver::Solve(vector<vector<double> >& DistMatrix,vector<int>& Assignment,TMethod Method)
{
if(DEBUG) {
printf("solve\n");
}
int N=DistMatrix.size(); // number of columns (tracks)
int M=DistMatrix[0].size(); // number of rows (measurements)
int *assignment =new int[N];
double *distIn =new double[N*M];
double cost;
// Fill matrix with random numbers
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
distIn[i+N*j] = DistMatrix[i][j];
}
}
switch(Method)
{
case optimal: assignmentoptimal(assignment, &cost, distIn, N, M); break;
case many_forbidden_assignments: assignmentoptimal(assignment, &cost, distIn, N, M); break;
case without_forbidden_assignments: assignmentoptimal(assignment, &cost, distIn, N, M); break;
}
// form result
Assignment.clear();
for(int x=0; x<N; x++)
{
Assignment.push_back(assignment[x]);
}
delete[] assignment;
delete[] distIn;
return cost;
}
// --------------------------------------------------------------------------
// Computes the optimal assignment (minimum overall costs) using Munkres algorithm.
// --------------------------------------------------------------------------
__global__ void findMinCol_gpu(double* d_distMatrix, double* d_dualVariablesColumn, int n) {
int tid = threadIdx.x * blockDim.x;
if (tid >= n) return;
int endIndex = tid + blockDim.x;
d_columnAnswer = d_distMatrix[tid];
for(int i = tid; i < endIndex; i++) {
if (d_distMatrix[i] < d_columnAnswer) { d_columnAnswer = d_distMatrix[i]; }
}
//printf("threadIdx.x: %d, tid: %d, endIndex: %d, d_colAnswer: %f\n", threadIdx.x, tid, endIndex, d_columnAnswer);
d_dualVariablesColumn[threadIdx.x] = d_columnAnswer;
}
__global__ void findMinRow_gpu(double* d_distMatrix, double* d_dualVariablesRow, int n) {
int tid = threadIdx.x;
if (tid >= n) return;
int endIndex = n;
d_rowAnswer = d_distMatrix[tid];
for(int i = tid; i < endIndex; i += blockDim.x) {
if (d_distMatrix[i] < d_rowAnswer) { d_rowAnswer = d_distMatrix[i]; }
}
//printf("tid: %d, endIndex: %d, d_rowAnswer: %f\n", tid, endIndex, d_rowAnswer);
d_dualVariablesRow[threadIdx.x] = d_rowAnswer;
}
__global__ void subtractMinElementRow_gpu(double* d_distMatrix, double* d_dualVariablesRow, int n) {
int tid = threadIdx.x;
if (tid >= n) return;
// Subtract the smallest element in this row from each element in this row.
int nOfRows = sqrt((float)n);
int rowIdx = threadIdx.x % nOfRows;
double before = d_distMatrix[tid];
d_distMatrix[tid] = d_distMatrix[tid] - d_dualVariablesRow[rowIdx];
//printf("subtractMinElemRow, tid: %d, minElem: %f, bef: %f, aft: %f, rowIdx: %d \n", tid, d_dualVariablesRow[rowIdx], before, d_distMatrix[tid], rowIdx);
}
void AssignmentProblemSolver::assignmentoptimal(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
if (DEBUG) {
printf("assignment optimal.\n");
}
double *distMatrix;
double *dualVariablesRow;
double *dualVariablesColumn;
double *distMatrixTemp;
double *distMatrixEnd;
double *columnEnd;
double value;
double minValue;
bool *coveredColumns;
bool *coveredRows;
bool *starMatrix;
bool *newStarMatrix;
bool *primeMatrix;
int nOfElements;
int minDim;
int row;
int col;
// Init
*cost = 0;
for(row=0; row<nOfRows; row++)
{
assignment[row] = -1.0;
}
// Generate distance matrix
// and check matrix elements positiveness :)
// Total elements number
nOfElements = nOfRows * nOfColumns;
// Memory allocation
distMatrix = (double *)malloc(nOfElements * sizeof(double));
double * d_distMatrix;
hipMalloc((void **) &d_distMatrix, nOfElements * sizeof(double));
dualVariablesRow = (double *)malloc(nOfRows * sizeof(double));
double * d_dualVariablesRow;
hipMalloc((void **) &d_dualVariablesRow, nOfRows * sizeof(double));
dualVariablesColumn = (double *)malloc(nOfColumns * sizeof(double));
double * d_dualVariablesColumn;
hipMalloc((void**) &d_dualVariablesColumn, nOfColumns * sizeof(double));
// Pointer to last element
distMatrixEnd = distMatrix + nOfElements;
//
for(row=0; row<nOfElements; row++)
{
value = distMatrixIn[row];
if(value < 0)
{
cout << "All matrix elements have to be non-negative." << endl;
}
distMatrix[row] = value;
//printf("distMatrix[%d]: %f\n", row, value);
}
// Memory allocation
coveredColumns = (bool *)calloc(nOfColumns, sizeof(bool));
coveredRows = (bool *)calloc(nOfRows, sizeof(bool));
starMatrix = (bool *)calloc(nOfElements, sizeof(bool));
primeMatrix = (bool *)calloc(nOfElements, sizeof(bool));
newStarMatrix = (bool *)calloc(nOfElements, sizeof(bool)); /* used in step4 */
if (parallel) {
hipMemcpy(d_distMatrix, distMatrix, nOfElements * sizeof(double), hipMemcpyHostToDevice);
int blks = 1;
//findMinCol_gpu <<< blks, nOfRows >>> (d_distMatrix, d_dualVariablesColumn, nOfElements);
hipLaunchKernelGGL(( findMinRow_gpu) , dim3(blks), dim3(nOfColumns) , 0, 0, d_distMatrix, d_dualVariablesRow, nOfElements);
//hipDeviceSynchronize(); // GPU doesn't block CPU thread
hipLaunchKernelGGL(( subtractMinElementRow_gpu) , dim3(blks), dim3(nOfElements) , 0, 0, d_distMatrix, d_dualVariablesRow, nOfElements);
hipDeviceSynchronize(); // GPU doesn't block CPU thread
//hipMemcpy(dualVariablesRow, d_dualVariablesRow, nOfRows * sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(dualVariablesColumn, d_dualVariablesColumn, nOfColumns * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(distMatrix, d_distMatrix, nOfElements * sizeof(double), hipMemcpyDeviceToHost);
if (DEBUG) {
// for(int i = 0; i < nOfElements; i++) {
// printf("distMatrix[%d]: %f\n", i, distMatrix[i]);
// }
// for(int i = 0; i < nOfRows; i++) {
// printf("smallest value in row %d is: %f\n", i, dualVariablesRow[i]);
// }
/*
for(int i = 0; i < nOfColumns; i++) {
printf("smallest value in column %d is: %f\n", i, dualVariablesColumn[i]);
}
*/
}
} else {
/* preliminary steps */
for(row=0; row<nOfRows; row++)
{
/* find the smallest element in the row */
distMatrixTemp = distMatrix + row;
minValue = *distMatrixTemp;
distMatrixTemp += nOfRows;
while(distMatrixTemp < distMatrixEnd)
{
value = *distMatrixTemp;
if(value < minValue)
{
minValue = value;
}
distMatrixTemp += nOfRows;
}
/* subtract the smallest element from each element of the row */
distMatrixTemp = distMatrix + row;
while(distMatrixTemp < distMatrixEnd)
{
*distMatrixTemp -= minValue;
distMatrixTemp += nOfRows;
}
}
} // Do this in parallel and serial version
minDim = nOfRows;
/* Steps 1 and 2a */
for(row=0; row<nOfRows; row++)
{
for(col=0; col<nOfColumns; col++)
{
if(distMatrix[row + nOfRows*col] == 0)
{
if (DEBUG) { printf("found elements that are zero. distMatrix[%d] = %f\n", row + nOfRows*col, distMatrix[row + nOfRows*col]); }
if(!coveredColumns[col])
{
starMatrix[row + nOfRows*col] = true;
coveredColumns[col] = true;
break;
}
}
}
}
/* move to step 2b */
step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
/* compute cost and remove invalid assignments */
computeassignmentcost(assignment, cost, distMatrixIn, nOfRows);
/* free allocated memory */
free(distMatrix);
free(coveredColumns);
free(coveredRows);
free(starMatrix);
free(primeMatrix);
free(newStarMatrix);
return;
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns)
{
if (DEBUG) {printf("build assignment vector.\n");}
int row, col;
for(row=0; row<nOfRows; row++)
{
for(col=0; col<nOfColumns; col++)
{
if(starMatrix[row + nOfRows*col])
{
assignment[row] = col;
break;
}
}
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows)
{
if (DEBUG) { printf("compute assignment cost.\n");}
int row, col;
for(row=0; row<nOfRows; row++)
{
col = assignment[row];
if(col >= 0)
{
*cost += distMatrix[row + nOfRows*col];
}
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
if (DEBUG) { printf("step 2a\n"); }
bool *starMatrixTemp, *columnEnd;
int col;
/* cover every column containing a starred zero */
for(col=0; col<nOfColumns; col++)
{
starMatrixTemp = starMatrix + nOfRows*col;
columnEnd = starMatrixTemp + nOfRows;
while(starMatrixTemp < columnEnd)
{
if(*starMatrixTemp++)
{
coveredColumns[col] = true;
break;
}
}
}
/* move to step 3 */
step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
if (DEBUG) { printf("step 2b\n");}
int col, nOfCoveredColumns;
/* count covered columns */
nOfCoveredColumns = 0;
for(col=0; col<nOfColumns; col++)
{
if(coveredColumns[col])
{
nOfCoveredColumns++;
}
}
if(nOfCoveredColumns == minDim)
{
/* algorithm finished */
buildassignmentvector(assignment, starMatrix, nOfRows, nOfColumns);
}
else
{
/* move to step 3 */
step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
step3Counter++;
if (DEBUG) {printf("step 3, counter: %d\n", step3Counter);}
bool zerosFound;
int row, col, starCol;
zerosFound = true;
while(zerosFound)
{
zerosFound = false;
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
for(row=0; row<nOfRows; row++)
{
if (DEBUG) { printf("looked through all the columns, now looking for uncovered rows that are zero. distMatrix[%d] = %f\n", row + nOfRows*col, distMatrix[row + nOfRows*col]);}
if((!coveredRows[row]) && (distMatrix[row + nOfRows*col] == 0))
{
/* prime zero */
primeMatrix[row + nOfRows*col] = true;
/* find starred zero in current row */
for(starCol=0; starCol<nOfColumns; starCol++)
if(starMatrix[row + nOfRows*starCol])
{
break;
}
if(starCol == nOfColumns) /* no starred zero found */
{
/* move to step 4 */
step4(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim, row, col);
return;
}
else
{
coveredRows[row] = true;
coveredColumns[starCol] = false;
zerosFound = true;
break;
}
}
}
}
}
}
/* move to step 5 */
step5(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col)
{
if (DEBUG) { printf("step 4\n");}
int n, starRow, starCol, primeRow, primeCol;
int nOfElements = nOfRows*nOfColumns;
/* generate temporary copy of starMatrix */
for(n=0; n<nOfElements; n++)
{
newStarMatrix[n] = starMatrix[n];
}
/* star current zero */
newStarMatrix[row + nOfRows*col] = true;
/* find starred zero in current column */
starCol = col;
for(starRow=0; starRow<nOfRows; starRow++)
{
if(starMatrix[starRow + nOfRows*starCol])
{
break;
}
}
while(starRow<nOfRows)
{
/* unstar the starred zero */
newStarMatrix[starRow + nOfRows*starCol] = false;
/* find primed zero in current row */
primeRow = starRow;
for(primeCol=0; primeCol<nOfColumns; primeCol++)
{
if(primeMatrix[primeRow + nOfRows*primeCol])
{
break;
}
}
/* star the primed zero */
newStarMatrix[primeRow + nOfRows*primeCol] = true;
/* find starred zero in current column */
starCol = primeCol;
for(starRow=0; starRow<nOfRows; starRow++)
{
if(starMatrix[starRow + nOfRows*starCol])
{
break;
}
}
}
/* use temporary copy as new starMatrix */
/* delete all primes, uncover all rows */
for(n=0; n<nOfElements; n++)
{
primeMatrix[n] = false;
starMatrix[n] = newStarMatrix[n];
}
for(n=0; n<nOfRows; n++)
{
coveredRows[n] = false;
}
/* move to step 2a */
step2a(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
step5Counter++;
if (DEBUG) { printf("step 5, counter: %d\n", step5Counter);}
//if (DEBUG) { if (step5Counter >= 10) return; }
double h, value;
int row, col;
/* find smallest uncovered element h */
h = DBL_MAX;
for(row=0; row<nOfRows; row++)
{
if(!coveredRows[row])
{
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
value = distMatrix[row + nOfRows*col];
if(value < h)
{
h = value;
if (DEBUG) { printf("uncovered columns, new h: %f\n", h); }
}
}
}
}
}
/* add h to each covered row */
for(row=0; row<nOfRows; row++)
{
if(coveredRows[row])
{
for(col=0; col<nOfColumns; col++)
{
if (DEBUG) { printf("Adding h to each covered row: %d\n", row + nOfRows*col);}
distMatrix[row + nOfRows*col] += h;
}
}
}
/* subtract h from each uncovered column */
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
for(row=0; row<nOfRows; row++)
{
if (DEBUG) { printf("subtract h from uncovered columns: %d\n", row + nOfRows*col); }
distMatrix[row + nOfRows*col] -= h;
}
}
}
/* move to step 3 */
step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
// Computes a suboptimal solution. Good for cases without forbidden assignments.
// --------------------------------------------------------------------------
void AssignmentProblemSolver::assignmentsuboptimal2(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
printf("assignmentsuboptimal2...............................................\n");
}
// --------------------------------------------------------------------------
// Computes a suboptimal solution. Good for cases with many forbidden assignments.
// --------------------------------------------------------------------------
void AssignmentProblemSolver::assignmentsuboptimal1(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
printf("assignmentsuboptimal1..............................................\n");
}
// --------------------------------------------------------------------------
// Usage example
// --------------------------------------------------------------------------
//int main(void)
int main( int argc, char **argv )
{
int n = read_int( argc, argv, "-n", 10 );
int m = read_int( argc, argv, "-m", 10);
int print = read_int( argc, argv, "-p", 0);
parallel = read_int( argc, argv, "-L", 1);
DEBUG = read_int( argc, argv, "-d", 0);
// This takes a few seconds to initialize the runtime
hipDeviceSynchronize();
// Matrix size
int N=n; // tracks rows
int M=m; // detects columns
// Random numbers generator initialization
//srand (time(NULL));
srand(1);
// Distance matrix N-th track to M-th detect.
vector< vector<double> > Cost(N,vector<double>(M));
// Fill matrix with random values
printf("HungarianAlg.cpp\n");
printf("Creating a random Cost Matrix:\n");
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
Cost[i][j] = (double)(rand()%1000)/1000.0;
if (print) { std::cout << Cost[i][j] << "\t";}
}
if (print) { std::cout << std::endl;}
}
AssignmentProblemSolver APS;
vector<int> Assignment;
printf("Solving the random matrix...\n");
double solve_time = read_timer( );
double totalCost = APS.Solve(Cost, Assignment);
//cout << APS.Solve(Cost,Assignment) << endl;
solve_time = read_timer( ) - solve_time;
printf("Total solve_time: %g\n", solve_time);
// Output the result
if (print) {
for(int x=0; x<N; x++)
{
std::cout << x << ":" << Assignment[x] << "\t";
}
}
}
// --------------------------------------------------------------------------
| 3e6b81a763b5c8fcf6e830bb0829ce0c68de840d.cu | #include "HungarianAlg.h"
#define NUM_THREADS 256
using namespace std;
int parallel = 1;
int DEBUG = 0;
__device__ double d_columnAnswer;
__device__ double d_rowAnswer;
int step3Counter = 0;
int step5Counter = 0;
AssignmentProblemSolver::AssignmentProblemSolver()
{
}
AssignmentProblemSolver::~AssignmentProblemSolver()
{
}
//
// timer
//
double read_timer( )
{
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
for( int i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
double AssignmentProblemSolver::Solve(vector<vector<double> >& DistMatrix,vector<int>& Assignment,TMethod Method)
{
if(DEBUG) {
printf("solve\n");
}
int N=DistMatrix.size(); // number of columns (tracks)
int M=DistMatrix[0].size(); // number of rows (measurements)
int *assignment =new int[N];
double *distIn =new double[N*M];
double cost;
// Fill matrix with random numbers
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
distIn[i+N*j] = DistMatrix[i][j];
}
}
switch(Method)
{
case optimal: assignmentoptimal(assignment, &cost, distIn, N, M); break;
case many_forbidden_assignments: assignmentoptimal(assignment, &cost, distIn, N, M); break;
case without_forbidden_assignments: assignmentoptimal(assignment, &cost, distIn, N, M); break;
}
// form result
Assignment.clear();
for(int x=0; x<N; x++)
{
Assignment.push_back(assignment[x]);
}
delete[] assignment;
delete[] distIn;
return cost;
}
// --------------------------------------------------------------------------
// Computes the optimal assignment (minimum overall costs) using Munkres algorithm.
// --------------------------------------------------------------------------
__global__ void findMinCol_gpu(double* d_distMatrix, double* d_dualVariablesColumn, int n) {
int tid = threadIdx.x * blockDim.x;
if (tid >= n) return;
int endIndex = tid + blockDim.x;
d_columnAnswer = d_distMatrix[tid];
for(int i = tid; i < endIndex; i++) {
if (d_distMatrix[i] < d_columnAnswer) { d_columnAnswer = d_distMatrix[i]; }
}
//printf("threadIdx.x: %d, tid: %d, endIndex: %d, d_colAnswer: %f\n", threadIdx.x, tid, endIndex, d_columnAnswer);
d_dualVariablesColumn[threadIdx.x] = d_columnAnswer;
}
__global__ void findMinRow_gpu(double* d_distMatrix, double* d_dualVariablesRow, int n) {
int tid = threadIdx.x;
if (tid >= n) return;
int endIndex = n;
d_rowAnswer = d_distMatrix[tid];
for(int i = tid; i < endIndex; i += blockDim.x) {
if (d_distMatrix[i] < d_rowAnswer) { d_rowAnswer = d_distMatrix[i]; }
}
//printf("tid: %d, endIndex: %d, d_rowAnswer: %f\n", tid, endIndex, d_rowAnswer);
d_dualVariablesRow[threadIdx.x] = d_rowAnswer;
}
__global__ void subtractMinElementRow_gpu(double* d_distMatrix, double* d_dualVariablesRow, int n) {
int tid = threadIdx.x;
if (tid >= n) return;
// Subtract the smallest element in this row from each element in this row.
int nOfRows = sqrt((float)n);
int rowIdx = threadIdx.x % nOfRows;
double before = d_distMatrix[tid];
d_distMatrix[tid] = d_distMatrix[tid] - d_dualVariablesRow[rowIdx];
//printf("subtractMinElemRow, tid: %d, minElem: %f, bef: %f, aft: %f, rowIdx: %d \n", tid, d_dualVariablesRow[rowIdx], before, d_distMatrix[tid], rowIdx);
}
void AssignmentProblemSolver::assignmentoptimal(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
if (DEBUG) {
printf("assignment optimal.\n");
}
double *distMatrix;
double *dualVariablesRow;
double *dualVariablesColumn;
double *distMatrixTemp;
double *distMatrixEnd;
double *columnEnd;
double value;
double minValue;
bool *coveredColumns;
bool *coveredRows;
bool *starMatrix;
bool *newStarMatrix;
bool *primeMatrix;
int nOfElements;
int minDim;
int row;
int col;
// Init
*cost = 0;
for(row=0; row<nOfRows; row++)
{
assignment[row] = -1.0;
}
// Generate distance matrix
// and check matrix elements positiveness :)
// Total elements number
nOfElements = nOfRows * nOfColumns;
// Memory allocation
distMatrix = (double *)malloc(nOfElements * sizeof(double));
double * d_distMatrix;
cudaMalloc((void **) &d_distMatrix, nOfElements * sizeof(double));
dualVariablesRow = (double *)malloc(nOfRows * sizeof(double));
double * d_dualVariablesRow;
cudaMalloc((void **) &d_dualVariablesRow, nOfRows * sizeof(double));
dualVariablesColumn = (double *)malloc(nOfColumns * sizeof(double));
double * d_dualVariablesColumn;
cudaMalloc((void**) &d_dualVariablesColumn, nOfColumns * sizeof(double));
// Pointer to last element
distMatrixEnd = distMatrix + nOfElements;
//
for(row=0; row<nOfElements; row++)
{
value = distMatrixIn[row];
if(value < 0)
{
cout << "All matrix elements have to be non-negative." << endl;
}
distMatrix[row] = value;
//printf("distMatrix[%d]: %f\n", row, value);
}
// Memory allocation
coveredColumns = (bool *)calloc(nOfColumns, sizeof(bool));
coveredRows = (bool *)calloc(nOfRows, sizeof(bool));
starMatrix = (bool *)calloc(nOfElements, sizeof(bool));
primeMatrix = (bool *)calloc(nOfElements, sizeof(bool));
newStarMatrix = (bool *)calloc(nOfElements, sizeof(bool)); /* used in step4 */
if (parallel) {
cudaMemcpy(d_distMatrix, distMatrix, nOfElements * sizeof(double), cudaMemcpyHostToDevice);
int blks = 1;
//findMinCol_gpu <<< blks, nOfRows >>> (d_distMatrix, d_dualVariablesColumn, nOfElements);
findMinRow_gpu <<< blks, nOfColumns >>> (d_distMatrix, d_dualVariablesRow, nOfElements);
//cudaDeviceSynchronize(); // GPU doesn't block CPU thread
subtractMinElementRow_gpu <<< blks, nOfElements >>> (d_distMatrix, d_dualVariablesRow, nOfElements);
cudaDeviceSynchronize(); // GPU doesn't block CPU thread
//cudaMemcpy(dualVariablesRow, d_dualVariablesRow, nOfRows * sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(dualVariablesColumn, d_dualVariablesColumn, nOfColumns * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(distMatrix, d_distMatrix, nOfElements * sizeof(double), cudaMemcpyDeviceToHost);
if (DEBUG) {
// for(int i = 0; i < nOfElements; i++) {
// printf("distMatrix[%d]: %f\n", i, distMatrix[i]);
// }
// for(int i = 0; i < nOfRows; i++) {
// printf("smallest value in row %d is: %f\n", i, dualVariablesRow[i]);
// }
/*
for(int i = 0; i < nOfColumns; i++) {
printf("smallest value in column %d is: %f\n", i, dualVariablesColumn[i]);
}
*/
}
} else {
/* preliminary steps */
for(row=0; row<nOfRows; row++)
{
/* find the smallest element in the row */
distMatrixTemp = distMatrix + row;
minValue = *distMatrixTemp;
distMatrixTemp += nOfRows;
while(distMatrixTemp < distMatrixEnd)
{
value = *distMatrixTemp;
if(value < minValue)
{
minValue = value;
}
distMatrixTemp += nOfRows;
}
/* subtract the smallest element from each element of the row */
distMatrixTemp = distMatrix + row;
while(distMatrixTemp < distMatrixEnd)
{
*distMatrixTemp -= minValue;
distMatrixTemp += nOfRows;
}
}
} // Do this in parallel and serial version
minDim = nOfRows;
/* Steps 1 and 2a */
for(row=0; row<nOfRows; row++)
{
for(col=0; col<nOfColumns; col++)
{
if(distMatrix[row + nOfRows*col] == 0)
{
if (DEBUG) { printf("found elements that are zero. distMatrix[%d] = %f\n", row + nOfRows*col, distMatrix[row + nOfRows*col]); }
if(!coveredColumns[col])
{
starMatrix[row + nOfRows*col] = true;
coveredColumns[col] = true;
break;
}
}
}
}
/* move to step 2b */
step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
/* compute cost and remove invalid assignments */
computeassignmentcost(assignment, cost, distMatrixIn, nOfRows);
/* free allocated memory */
free(distMatrix);
free(coveredColumns);
free(coveredRows);
free(starMatrix);
free(primeMatrix);
free(newStarMatrix);
return;
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns)
{
if (DEBUG) {printf("build assignment vector.\n");}
int row, col;
for(row=0; row<nOfRows; row++)
{
for(col=0; col<nOfColumns; col++)
{
if(starMatrix[row + nOfRows*col])
{
assignment[row] = col;
break;
}
}
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows)
{
if (DEBUG) { printf("compute assignment cost.\n");}
int row, col;
for(row=0; row<nOfRows; row++)
{
col = assignment[row];
if(col >= 0)
{
*cost += distMatrix[row + nOfRows*col];
}
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
if (DEBUG) { printf("step 2a\n"); }
bool *starMatrixTemp, *columnEnd;
int col;
/* cover every column containing a starred zero */
for(col=0; col<nOfColumns; col++)
{
starMatrixTemp = starMatrix + nOfRows*col;
columnEnd = starMatrixTemp + nOfRows;
while(starMatrixTemp < columnEnd)
{
if(*starMatrixTemp++)
{
coveredColumns[col] = true;
break;
}
}
}
/* move to step 3 */
step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
if (DEBUG) { printf("step 2b\n");}
int col, nOfCoveredColumns;
/* count covered columns */
nOfCoveredColumns = 0;
for(col=0; col<nOfColumns; col++)
{
if(coveredColumns[col])
{
nOfCoveredColumns++;
}
}
if(nOfCoveredColumns == minDim)
{
/* algorithm finished */
buildassignmentvector(assignment, starMatrix, nOfRows, nOfColumns);
}
else
{
/* move to step 3 */
step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
step3Counter++;
if (DEBUG) {printf("step 3, counter: %d\n", step3Counter);}
bool zerosFound;
int row, col, starCol;
zerosFound = true;
while(zerosFound)
{
zerosFound = false;
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
for(row=0; row<nOfRows; row++)
{
if (DEBUG) { printf("looked through all the columns, now looking for uncovered rows that are zero. distMatrix[%d] = %f\n", row + nOfRows*col, distMatrix[row + nOfRows*col]);}
if((!coveredRows[row]) && (distMatrix[row + nOfRows*col] == 0))
{
/* prime zero */
primeMatrix[row + nOfRows*col] = true;
/* find starred zero in current row */
for(starCol=0; starCol<nOfColumns; starCol++)
if(starMatrix[row + nOfRows*starCol])
{
break;
}
if(starCol == nOfColumns) /* no starred zero found */
{
/* move to step 4 */
step4(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim, row, col);
return;
}
else
{
coveredRows[row] = true;
coveredColumns[starCol] = false;
zerosFound = true;
break;
}
}
}
}
}
}
/* move to step 5 */
step5(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col)
{
if (DEBUG) { printf("step 4\n");}
int n, starRow, starCol, primeRow, primeCol;
int nOfElements = nOfRows*nOfColumns;
/* generate temporary copy of starMatrix */
for(n=0; n<nOfElements; n++)
{
newStarMatrix[n] = starMatrix[n];
}
/* star current zero */
newStarMatrix[row + nOfRows*col] = true;
/* find starred zero in current column */
starCol = col;
for(starRow=0; starRow<nOfRows; starRow++)
{
if(starMatrix[starRow + nOfRows*starCol])
{
break;
}
}
while(starRow<nOfRows)
{
/* unstar the starred zero */
newStarMatrix[starRow + nOfRows*starCol] = false;
/* find primed zero in current row */
primeRow = starRow;
for(primeCol=0; primeCol<nOfColumns; primeCol++)
{
if(primeMatrix[primeRow + nOfRows*primeCol])
{
break;
}
}
/* star the primed zero */
newStarMatrix[primeRow + nOfRows*primeCol] = true;
/* find starred zero in current column */
starCol = primeCol;
for(starRow=0; starRow<nOfRows; starRow++)
{
if(starMatrix[starRow + nOfRows*starCol])
{
break;
}
}
}
/* use temporary copy as new starMatrix */
/* delete all primes, uncover all rows */
for(n=0; n<nOfElements; n++)
{
primeMatrix[n] = false;
starMatrix[n] = newStarMatrix[n];
}
for(n=0; n<nOfRows; n++)
{
coveredRows[n] = false;
}
/* move to step 2a */
step2a(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
//
// --------------------------------------------------------------------------
void AssignmentProblemSolver::step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
{
step5Counter++;
if (DEBUG) { printf("step 5, counter: %d\n", step5Counter);}
//if (DEBUG) { if (step5Counter >= 10) return; }
double h, value;
int row, col;
/* find smallest uncovered element h */
h = DBL_MAX;
for(row=0; row<nOfRows; row++)
{
if(!coveredRows[row])
{
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
value = distMatrix[row + nOfRows*col];
if(value < h)
{
h = value;
if (DEBUG) { printf("uncovered columns, new h: %f\n", h); }
}
}
}
}
}
/* add h to each covered row */
for(row=0; row<nOfRows; row++)
{
if(coveredRows[row])
{
for(col=0; col<nOfColumns; col++)
{
if (DEBUG) { printf("Adding h to each covered row: %d\n", row + nOfRows*col);}
distMatrix[row + nOfRows*col] += h;
}
}
}
/* subtract h from each uncovered column */
for(col=0; col<nOfColumns; col++)
{
if(!coveredColumns[col])
{
for(row=0; row<nOfRows; row++)
{
if (DEBUG) { printf("subtract h from uncovered columns: %d\n", row + nOfRows*col); }
distMatrix[row + nOfRows*col] -= h;
}
}
}
/* move to step 3 */
step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
}
// --------------------------------------------------------------------------
// Computes a suboptimal solution. Good for cases without forbidden assignments.
// --------------------------------------------------------------------------
void AssignmentProblemSolver::assignmentsuboptimal2(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
printf("assignmentsuboptimal2...............................................\n");
}
// --------------------------------------------------------------------------
// Computes a suboptimal solution. Good for cases with many forbidden assignments.
// --------------------------------------------------------------------------
void AssignmentProblemSolver::assignmentsuboptimal1(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
{
printf("assignmentsuboptimal1..............................................\n");
}
// --------------------------------------------------------------------------
// Usage example
// --------------------------------------------------------------------------
//int main(void)
int main( int argc, char **argv )
{
int n = read_int( argc, argv, "-n", 10 );
int m = read_int( argc, argv, "-m", 10);
int print = read_int( argc, argv, "-p", 0);
parallel = read_int( argc, argv, "-L", 1);
DEBUG = read_int( argc, argv, "-d", 0);
// This takes a few seconds to initialize the runtime
cudaThreadSynchronize();
// Matrix size
int N=n; // tracks rows
int M=m; // detects columns
// Random numbers generator initialization
//srand (time(NULL));
srand(1);
// Distance matrix N-th track to M-th detect.
vector< vector<double> > Cost(N,vector<double>(M));
// Fill matrix with random values
printf("HungarianAlg.cpp\n");
printf("Creating a random Cost Matrix:\n");
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
Cost[i][j] = (double)(rand()%1000)/1000.0;
if (print) { std::cout << Cost[i][j] << "\t";}
}
if (print) { std::cout << std::endl;}
}
AssignmentProblemSolver APS;
vector<int> Assignment;
printf("Solving the random matrix...\n");
double solve_time = read_timer( );
double totalCost = APS.Solve(Cost, Assignment);
//cout << APS.Solve(Cost,Assignment) << endl;
solve_time = read_timer( ) - solve_time;
printf("Total solve_time: %g\n", solve_time);
// Output the result
if (print) {
for(int x=0; x<N; x++)
{
std::cout << x << ":" << Assignment[x] << "\t";
}
}
}
// --------------------------------------------------------------------------
|
0a346848fb07e074c1c36cb29be284e4d3dd79eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "Utilities.cuh"
using namespace std;
#define NUM_THREADS 32
#define NUM_BLOCKS 16
#define NUM_STREAMS 3
__global__ void kernel(const int *in, int *out, int N)
{
int start = blockIdx.x * blockDim.x + threadIdx.x;
int end = N;
for (int i = start; i < end; i += blockDim.x * gridDim.x)
{
out[i] = in[i] * in[i];
}
}
int main()
{
const int N = 6000000;
// --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch hipMemcpyAsync).
int *h_in = new int[N]; for(int i = 0; i < N; i++) h_in[i] = 5;
gpuErrchk(hipHostRegister(h_in, N * sizeof(int), hipHostRegisterPortable));
// --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch hipMemcpyAsync).
int *h_out = new int[N]; for(int i = 0; i < N; i++) h_out[i] = 0;
gpuErrchk(hipHostRegister(h_out, N * sizeof(int), hipHostRegisterPortable));
// --- Host side check results vector allocation and initialization
int *h_checkResults = new int[N]; for(int i = 0; i < N; i++) h_checkResults[i] = h_in[i] * h_in[i];
// --- Device side input data allocation.
int *d_in = 0; gpuErrchk(hipMalloc((void **)&d_in, N * sizeof(int)));
// --- Device side output data allocation.
int *d_out = 0; gpuErrchk( hipMalloc((void **)&d_out, N * sizeof(int)));
int streamSize = N / NUM_STREAMS;
size_t streamMemSize = N * sizeof(int) / NUM_STREAMS;
// --- Set kernel launch configuration
dim3 nThreads = dim3(NUM_THREADS,1,1);
dim3 nBlocks = dim3(NUM_BLOCKS, 1,1);
dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2));
// --- Create CUDA streams
hipStream_t streams[NUM_STREAMS];
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(hipStreamCreate(&streams[i]));
/**************************/
/* BREADTH-FIRST APPROACH */
/**************************/
for(int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
hipMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, hipMemcpyHostToDevice, streams[i]);
}
for(int i = 0; i < NUM_STREAMS; i++)
{
int offset = i * streamSize;
hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, streams[i], &d_in[offset], &d_out[offset], streamSize/2);
hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, streams[i], &d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2);
}
for(int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
hipMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, hipMemcpyDeviceToHost, streams[i]);
}
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(hipStreamSynchronize(streams[i]));
gpuErrchk(hipDeviceSynchronize());
// --- Release resources
gpuErrchk(hipHostUnregister(h_in));
gpuErrchk(hipHostUnregister(h_out));
gpuErrchk(hipFree(d_in));
gpuErrchk(hipFree(d_out));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(hipStreamDestroy(streams[i]));
hipDeviceReset();
// --- GPU output check
int sum = 0;
for(int i = 0; i < N; i++)
sum += h_checkResults[i] - h_out[i];
cout << "Error between CPU and GPU: " << sum << endl;
delete[] h_in;
delete[] h_out;
delete[] h_checkResults;
return 0;
}
| 0a346848fb07e074c1c36cb29be284e4d3dd79eb.cu | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "Utilities.cuh"
using namespace std;
#define NUM_THREADS 32
#define NUM_BLOCKS 16
#define NUM_STREAMS 3
__global__ void kernel(const int *in, int *out, int N)
{
int start = blockIdx.x * blockDim.x + threadIdx.x;
int end = N;
for (int i = start; i < end; i += blockDim.x * gridDim.x)
{
out[i] = in[i] * in[i];
}
}
int main()
{
const int N = 6000000;
// --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch cudaMemcpyAsync).
int *h_in = new int[N]; for(int i = 0; i < N; i++) h_in[i] = 5;
gpuErrchk(cudaHostRegister(h_in, N * sizeof(int), cudaHostRegisterPortable));
// --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch cudaMemcpyAsync).
int *h_out = new int[N]; for(int i = 0; i < N; i++) h_out[i] = 0;
gpuErrchk(cudaHostRegister(h_out, N * sizeof(int), cudaHostRegisterPortable));
// --- Host side check results vector allocation and initialization
int *h_checkResults = new int[N]; for(int i = 0; i < N; i++) h_checkResults[i] = h_in[i] * h_in[i];
// --- Device side input data allocation.
int *d_in = 0; gpuErrchk(cudaMalloc((void **)&d_in, N * sizeof(int)));
// --- Device side output data allocation.
int *d_out = 0; gpuErrchk( cudaMalloc((void **)&d_out, N * sizeof(int)));
int streamSize = N / NUM_STREAMS;
size_t streamMemSize = N * sizeof(int) / NUM_STREAMS;
// --- Set kernel launch configuration
dim3 nThreads = dim3(NUM_THREADS,1,1);
dim3 nBlocks = dim3(NUM_BLOCKS, 1,1);
dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2));
// --- Create CUDA streams
cudaStream_t streams[NUM_STREAMS];
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(cudaStreamCreate(&streams[i]));
/**************************/
/* BREADTH-FIRST APPROACH */
/**************************/
for(int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice, streams[i]);
}
for(int i = 0; i < NUM_STREAMS; i++)
{
int offset = i * streamSize;
kernel<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset], &d_out[offset], streamSize/2);
kernel<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2);
}
for(int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost, streams[i]);
}
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(cudaStreamSynchronize(streams[i]));
gpuErrchk(cudaDeviceSynchronize());
// --- Release resources
gpuErrchk(cudaHostUnregister(h_in));
gpuErrchk(cudaHostUnregister(h_out));
gpuErrchk(cudaFree(d_in));
gpuErrchk(cudaFree(d_out));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(cudaStreamDestroy(streams[i]));
cudaDeviceReset();
// --- GPU output check
int sum = 0;
for(int i = 0; i < N; i++)
sum += h_checkResults[i] - h_out[i];
cout << "Error between CPU and GPU: " << sum << endl;
delete[] h_in;
delete[] h_out;
delete[] h_checkResults;
return 0;
}
|
e5de9f20094c639275162e8cd65cc80621100df4.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include "..\game\be_ai_lspi.h"
#include <stdlib.h>
#include <fstream>
#include <string>
#include "LspiAgent.h"
#include "GradientAgent.h"
#include <thrust/generate.h>
#include <sys/stat.h>
#include <iomanip>
#include <windows.h>
#ifdef GRADIENT
#ifdef CPU
GradientAgent<host_vector<float>> *agents[MAX_CLIENTS];
#else
GradientAgent<device_vector<float>> *agents[MAX_CLIENTS];
#endif
#else
#ifdef CPU
LspiAgent<host_vector<float>> *agents[MAX_CLIENTS];
#else
LspiAgent<device_vector<float>> *agents[MAX_CLIENTS];
#endif
#endif
#ifdef ONLINE
struct threadarg
{
int client;
sample *samples;
int size;
};
HANDLE updateMutex, threadHandle;
bool updateComplete = true;
__int64 frequency;
#endif
using namespace std;
/*
* Loads the bot's policy from a file and spawns an LspiAgent using the policy.
*/
void LspiBot_Init(int client)
{
#ifdef ONLINE
if(updateMutex == NULL)
{
updateMutex = CreateMutex(NULL, FALSE, NULL);
}
LARGE_INTEGER li;
if(QueryPerformanceFrequency(&li))
{
frequency = li.QuadPart;
}
#endif
host_vector<float> policy(NUM_ACTIONS*BASIS_SIZE);
string value;
#ifdef GRADIENT
char *fname = "grad.pol";
#else
char *fname = "lspi.pol";
#endif
struct stat buf;
ifstream infile;
ofstream outfile;
if(stat(fname, &buf) == -1)
{
outfile.open(fname);
for(int i = 0; i < policy.size(); i++)
{
policy[i] = ((float)rand()/RAND_MAX);
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
}
else
{
infile.open(fname);
for(int i = 0; i < NUM_ACTIONS*BASIS_SIZE; i++)
{
getline(infile, value, ',');
policy[i] = (float)atof(value.c_str());
}
infile.close();
}
#ifndef CPU
hipblasStatus_t stat = hipblasCreate(&blas::handle);
#endif
#ifdef EXPLORE
bool explore = true;
#else
bool explore = false;
#endif
#ifdef GRADIENT
#ifdef CPU
agents[client] = new GradientAgent<host_vector<float>>(policy, 0.01, 0.80);
#else
agents[client] = new GradientAgent<device_vector<float>>(policy, 0.01, 0.80);
#endif
#else
#ifdef CPU
agents[client] = new LspiAgent<host_vector<float>>(policy, 0.95, explore, EXP_RATE);
#else
agents[client] = new LspiAgent<device_vector<float>>(policy, 0.95, explore, EXP_RATE);
#endif
#endif
}
void LspiBot_Shutdown(int client)
{
#ifdef GRADIENT
ofstream outfile;
char *fname = "grad.pol";
float temp;
outfile.open(fname);
for(int i = 0; i < agents[client]->w.size(); i++)
{
temp = agents[client]->w[i];
if(i + 1 == agents[client]->w.size())
{
outfile << fixed << setprecision(8) << agents[client]->w[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << agents[client]->w[i] << ",";
}
}
outfile.close();
#endif
#ifdef ONLINE
SuspendThread(threadHandle);
CloseHandle(threadHandle);
CloseHandle(updateMutex);
#endif
delete agents[client];
}
int LspiBot_GetAction(int client, lspi_action_basis_t *basis) {
return agents[client]->getAction(basis);
}
void LspiBot_GradUpdate(int client, lspi_action_basis_t *prev, lspi_action_basis_t *cur, int action)
{
#ifdef GRADIENT
sample s;
s.state = cur;
s.action = action;
s.final_state = prev;
agents[client]->update(s);
#endif
}
#ifdef ONLINE
DWORD WINAPI UpdateThread( LPVOID lpParam )
{
LARGE_INTEGER before, after;
QueryPerformanceCounter(&before);
threadarg *args = (threadarg*)lpParam;
host_vector<sample> samples(args->size);
for(int i = 0; i < args->size; i++)
{
sample s;
s.state = args->samples[i].state;
s.final_state = args->samples[i].final_state;
s.action = args->samples[i].action;
samples[i] = s;
}
device_vector<sample> dev_samples = samples;
host_vector<float> policy = agents[args->client]->updatePolicy(dev_samples);
// Write to file
ofstream outfile("lspi.pol");
for(int i = 0; i < policy.size(); i++)
{
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
QueryPerformanceCounter(&after);
double policy_update_time = (double)(after.QuadPart - before.QuadPart)/frequency;
ofstream perffile;
char *fname = "perf_online.dat";
outfile.open(fname, ofstream::app);
outfile << "Policy Update (Size, Time): " << args->size << ", " << fixed << setprecision(8) << 1000.0*policy_update_time << endl;
outfile.close();
free(args);
WaitForSingleObject(updateMutex, INFINITE);
updateComplete = true;
ReleaseMutex(updateMutex);
return 0;
}
int LspiBot_Update(int client, sample *samples, int size)
{
WaitForSingleObject(updateMutex, INFINITE);
if(updateComplete)
{
if(threadHandle != NULL)
{
CloseHandle(threadHandle);
}
threadarg *args = (threadarg *)malloc(sizeof(threadarg));
args->client = client;
args->samples = samples;
args->size = size;
threadHandle = CreateThread(NULL, 0, UpdateThread, args, 0, NULL);
if(threadHandle == NULL)
{
int test_val; // We can't print anything out from here... so let's just make something we can put a stop point on.
}
updateComplete = false;
ReleaseMutex(updateMutex);
return 1;
}
ReleaseMutex(updateMutex);
return 0;
}
#else
void LspiBot_Update(int client, const char *fname)
{
#ifndef GRADIENT
// Load the samples into a vector and update LSPI agent's policy
host_vector<sample> samples;
string value;
ifstream file(fname);
thrust::host_vector<sample>::iterator it = samples.end();
while(file.good())
{
sample s;
lspi_action_basis_t *state = (lspi_action_basis_t*)malloc(sizeof(lspi_action_basis_t));
lspi_action_basis_t *fstate = (lspi_action_basis_t*)malloc(sizeof(lspi_action_basis_t));
s.state = state;
s.final_state = fstate;
//// Action ////
if(!getline(file, value, ','))
{
break;
}
s.action = atoi(value.c_str());
////////////////
/***** START READING STATE *****/
//// For calculated reward ////
getline(file, value, ',');
state->kill_diff = atoi(value.c_str());
getline(file, value, ',');
state->death_diff = atoi(value.c_str());
getline(file, value, ',');
state->health_diff = atoi(value.c_str());
getline(file, value, ',');
state->armor_diff = atoi(value.c_str());
getline(file, value, ',');
state->hit_count_diff = atoi(value.c_str());
///////////////////////////////
//// Stats ////
getline(file, value, ',');
state->stat_health = atoi(value.c_str());
getline(file, value, ',');
state->stat_armor = atoi(value.c_str());
getline(file, value, ',');
state->stat_max_health = atoi(value.c_str());
///////////////
//// Powerups ////
getline(file, value, ',');
state->pw_quad = atoi(value.c_str());
getline(file, value, ',');
state->pw_battlesuit = atoi(value.c_str());
getline(file, value, ',');
state->pw_haste = atoi(value.c_str());
getline(file, value, ',');
state->pw_invis = atoi(value.c_str());
getline(file, value, ',');
state->pw_regen = atoi(value.c_str());
getline(file, value, ',');
state->pw_flight = atoi(value.c_str());
getline(file, value, ',');
state->pw_scout = atoi(value.c_str());
getline(file, value, ',');
state->pw_guard = atoi(value.c_str());
getline(file, value, ',');
state->pw_doubler = atoi(value.c_str());
getline(file, value, ',');
state->pw_ammoregen = atoi(value.c_str());
getline(file, value, ',');
state->pw_invulnerability = atoi(value.c_str());
//////////////////
//// Ammo ////
getline(file, value, ',');
state->wp_gauntlet = atoi(value.c_str());
getline(file, value, ',');
state->wp_machinegun = atoi(value.c_str());
getline(file, value, ',');
state->wp_shotgun = atoi(value.c_str());
getline(file, value, ',');
state->wp_grenade_launcher = atoi(value.c_str());
getline(file, value, ',');
state->wp_rocket_launcher = atoi(value.c_str());
getline(file, value, ',');
state->wp_lightning = atoi(value.c_str());
getline(file, value, ',');
state->wp_railgun = atoi(value.c_str());
getline(file, value, ',');
state->wp_plasmagun = atoi(value.c_str());
getline(file, value, ',');
state->wp_bfg = atoi(value.c_str());
getline(file, value, ',');
state->wp_grappling_hook = atoi(value.c_str());
//////////////
//// Enemy Info ////
getline(file, value, ',');
state->enemy = atoi(value.c_str());
getline(file, value, ',');
state->enemy_line_dist = (float)atof(value.c_str());
getline(file, value, ',');
state->enemyposition_time = (float)atof(value.c_str());
getline(file, value, ',');
state->enemy_is_invisible = atoi(value.c_str());
getline(file, value, ',');
state->enemy_is_shooting = atoi(value.c_str());
getline(file, value, ',');
state->enemy_weapon = atoi(value.c_str());
////////////////////
//// Goal Info////
getline(file, value, ',');
state->goal_flags = atoi(value.c_str());
getline(file, value, ',');
state->item_type = atoi(value.c_str());
//////////////////
//// Exit Information ////
getline(file, value, ',');
state->last_enemy_area_exits = atoi(value.c_str());
getline(file, value, ',');
state->goal_area_exits = atoi(value.c_str());
getline(file, value, ',');
state->current_area_exits = atoi(value.c_str());
//////////////////////////
//// Area Numbers ////
getline(file, value, ',');
state->current_area_num = atoi(value.c_str());
getline(file, value, ',');
state->goal_area_num = atoi(value.c_str());
getline(file, value, ',');
state->enemy_area_num = atoi(value.c_str());
//////////////////////////
//// Misc ////
getline(file, value, ',');
state->tfl = atoi(value.c_str());
getline(file, value, ',');
state->last_hit_count = atoi(value.c_str());
//////////////
/***** END READING STATE *****/
/***** START READING FINAL STATE *****/
//// For calculated reward ////
getline(file, value, ',');
fstate->kill_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->death_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->health_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->armor_diff = atoi(value.c_str());
getline(file, value, ',');
state->hit_count_diff = atoi(value.c_str());
///////////////////////////////
//// Stats ////
getline(file, value, ',');
fstate->stat_health = atoi(value.c_str());
getline(file, value, ',');
fstate->stat_armor = atoi(value.c_str());
getline(file, value, ',');
fstate->stat_max_health = atoi(value.c_str());
///////////////
//// Powerups ////
getline(file, value, ',');
fstate->pw_quad = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_battlesuit = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_haste = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_invis = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_regen = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_flight = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_scout = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_guard = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_doubler = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_ammoregen = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_invulnerability = atoi(value.c_str());
//////////////////
//// Ammo ////
getline(file, value, ',');
fstate->wp_gauntlet = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_machinegun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_shotgun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_grenade_launcher = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_rocket_launcher = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_lightning = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_railgun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_plasmagun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_bfg = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_grappling_hook = atoi(value.c_str());
//////////////
//// Enemy Info ////
getline(file, value, ',');
fstate->enemy = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_line_dist = (float)atof(value.c_str());
getline(file, value, ',');
fstate->enemyposition_time = (float)atof(value.c_str());
getline(file, value, ',');
fstate->enemy_is_invisible = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_is_shooting = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_weapon = atoi(value.c_str());
////////////////////
//// Goal Info////
getline(file, value, ',');
fstate->goal_flags = atoi(value.c_str());
getline(file, value, ',');
fstate->item_type = atoi(value.c_str());
//////////////////
//// Exit Information ////
getline(file, value, ',');
fstate->last_enemy_area_exits = atoi(value.c_str());
getline(file, value, ',');
fstate->goal_area_exits = atoi(value.c_str());
getline(file, value, ',');
fstate->current_area_exits = atoi(value.c_str());
//////////////////////////
//// Area Numbers ////
getline(file, value, ',');
fstate->current_area_num = atoi(value.c_str());
getline(file, value, ',');
fstate->goal_area_num = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_area_num = atoi(value.c_str());
//////////////////////////
//// Misc ////
getline(file, value, ',');
fstate->tfl = atoi(value.c_str());
getline(file, value, '\n');
fstate->last_hit_count = atoi(value.c_str());
//////////////
/***** END READING FINAL STATE *****/
samples.insert(it, s);
it = samples.end();
}
file.close();
#ifdef CPU
host_vector<float> policy = agents[client]->updatePolicy(samples);
#else
device_vector<sample> dev_samples = samples;
host_vector<float> policy = agents[client]->updatePolicy(dev_samples);
#endif
// Write to file
ofstream outfile("lspi.pol");
for(int i = 0; i < policy.size(); i++)
{
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
// Free space used by samples
for(int i = 0; i < samples.size(); i++)
{
free(samples[i].final_state);
free(samples[i].state);
}
#endif
}
#endif | e5de9f20094c639275162e8cd65cc80621100df4.cu | #include "stdafx.h"
#include "..\game\be_ai_lspi.h"
#include <stdlib.h>
#include <fstream>
#include <string>
#include "LspiAgent.h"
#include "GradientAgent.h"
#include <thrust/generate.h>
#include <sys/stat.h>
#include <iomanip>
#include <windows.h>
#ifdef GRADIENT
#ifdef CPU
GradientAgent<host_vector<float>> *agents[MAX_CLIENTS];
#else
GradientAgent<device_vector<float>> *agents[MAX_CLIENTS];
#endif
#else
#ifdef CPU
LspiAgent<host_vector<float>> *agents[MAX_CLIENTS];
#else
LspiAgent<device_vector<float>> *agents[MAX_CLIENTS];
#endif
#endif
#ifdef ONLINE
struct threadarg
{
int client;
sample *samples;
int size;
};
HANDLE updateMutex, threadHandle;
bool updateComplete = true;
__int64 frequency;
#endif
using namespace std;
/*
* Loads the bot's policy from a file and spawns an LspiAgent using the policy.
*/
void LspiBot_Init(int client)
{
#ifdef ONLINE
if(updateMutex == NULL)
{
updateMutex = CreateMutex(NULL, FALSE, NULL);
}
LARGE_INTEGER li;
if(QueryPerformanceFrequency(&li))
{
frequency = li.QuadPart;
}
#endif
host_vector<float> policy(NUM_ACTIONS*BASIS_SIZE);
string value;
#ifdef GRADIENT
char *fname = "grad.pol";
#else
char *fname = "lspi.pol";
#endif
struct stat buf;
ifstream infile;
ofstream outfile;
if(stat(fname, &buf) == -1)
{
outfile.open(fname);
for(int i = 0; i < policy.size(); i++)
{
policy[i] = ((float)rand()/RAND_MAX);
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
}
else
{
infile.open(fname);
for(int i = 0; i < NUM_ACTIONS*BASIS_SIZE; i++)
{
getline(infile, value, ',');
policy[i] = (float)atof(value.c_str());
}
infile.close();
}
#ifndef CPU
cublasStatus_t stat = cublasCreate(&blas::handle);
#endif
#ifdef EXPLORE
bool explore = true;
#else
bool explore = false;
#endif
#ifdef GRADIENT
#ifdef CPU
agents[client] = new GradientAgent<host_vector<float>>(policy, 0.01, 0.80);
#else
agents[client] = new GradientAgent<device_vector<float>>(policy, 0.01, 0.80);
#endif
#else
#ifdef CPU
agents[client] = new LspiAgent<host_vector<float>>(policy, 0.95, explore, EXP_RATE);
#else
agents[client] = new LspiAgent<device_vector<float>>(policy, 0.95, explore, EXP_RATE);
#endif
#endif
}
void LspiBot_Shutdown(int client)
{
#ifdef GRADIENT
ofstream outfile;
char *fname = "grad.pol";
float temp;
outfile.open(fname);
for(int i = 0; i < agents[client]->w.size(); i++)
{
temp = agents[client]->w[i];
if(i + 1 == agents[client]->w.size())
{
outfile << fixed << setprecision(8) << agents[client]->w[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << agents[client]->w[i] << ",";
}
}
outfile.close();
#endif
#ifdef ONLINE
SuspendThread(threadHandle);
CloseHandle(threadHandle);
CloseHandle(updateMutex);
#endif
delete agents[client];
}
int LspiBot_GetAction(int client, lspi_action_basis_t *basis) {
return agents[client]->getAction(basis);
}
void LspiBot_GradUpdate(int client, lspi_action_basis_t *prev, lspi_action_basis_t *cur, int action)
{
#ifdef GRADIENT
sample s;
s.state = cur;
s.action = action;
s.final_state = prev;
agents[client]->update(s);
#endif
}
#ifdef ONLINE
DWORD WINAPI UpdateThread( LPVOID lpParam )
{
LARGE_INTEGER before, after;
QueryPerformanceCounter(&before);
threadarg *args = (threadarg*)lpParam;
host_vector<sample> samples(args->size);
for(int i = 0; i < args->size; i++)
{
sample s;
s.state = args->samples[i].state;
s.final_state = args->samples[i].final_state;
s.action = args->samples[i].action;
samples[i] = s;
}
device_vector<sample> dev_samples = samples;
host_vector<float> policy = agents[args->client]->updatePolicy(dev_samples);
// Write to file
ofstream outfile("lspi.pol");
for(int i = 0; i < policy.size(); i++)
{
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
QueryPerformanceCounter(&after);
double policy_update_time = (double)(after.QuadPart - before.QuadPart)/frequency;
ofstream perffile;
char *fname = "perf_online.dat";
outfile.open(fname, ofstream::app);
outfile << "Policy Update (Size, Time): " << args->size << ", " << fixed << setprecision(8) << 1000.0*policy_update_time << endl;
outfile.close();
free(args);
WaitForSingleObject(updateMutex, INFINITE);
updateComplete = true;
ReleaseMutex(updateMutex);
return 0;
}
int LspiBot_Update(int client, sample *samples, int size)
{
WaitForSingleObject(updateMutex, INFINITE);
if(updateComplete)
{
if(threadHandle != NULL)
{
CloseHandle(threadHandle);
}
threadarg *args = (threadarg *)malloc(sizeof(threadarg));
args->client = client;
args->samples = samples;
args->size = size;
threadHandle = CreateThread(NULL, 0, UpdateThread, args, 0, NULL);
if(threadHandle == NULL)
{
int test_val; // We can't print anything out from here... so let's just make something we can put a stop point on.
}
updateComplete = false;
ReleaseMutex(updateMutex);
return 1;
}
ReleaseMutex(updateMutex);
return 0;
}
#else
void LspiBot_Update(int client, const char *fname)
{
#ifndef GRADIENT
// Load the samples into a vector and update LSPI agent's policy
host_vector<sample> samples;
string value;
ifstream file(fname);
thrust::host_vector<sample>::iterator it = samples.end();
while(file.good())
{
sample s;
lspi_action_basis_t *state = (lspi_action_basis_t*)malloc(sizeof(lspi_action_basis_t));
lspi_action_basis_t *fstate = (lspi_action_basis_t*)malloc(sizeof(lspi_action_basis_t));
s.state = state;
s.final_state = fstate;
//// Action ////
if(!getline(file, value, ','))
{
break;
}
s.action = atoi(value.c_str());
////////////////
/***** START READING STATE *****/
//// For calculated reward ////
getline(file, value, ',');
state->kill_diff = atoi(value.c_str());
getline(file, value, ',');
state->death_diff = atoi(value.c_str());
getline(file, value, ',');
state->health_diff = atoi(value.c_str());
getline(file, value, ',');
state->armor_diff = atoi(value.c_str());
getline(file, value, ',');
state->hit_count_diff = atoi(value.c_str());
///////////////////////////////
//// Stats ////
getline(file, value, ',');
state->stat_health = atoi(value.c_str());
getline(file, value, ',');
state->stat_armor = atoi(value.c_str());
getline(file, value, ',');
state->stat_max_health = atoi(value.c_str());
///////////////
//// Powerups ////
getline(file, value, ',');
state->pw_quad = atoi(value.c_str());
getline(file, value, ',');
state->pw_battlesuit = atoi(value.c_str());
getline(file, value, ',');
state->pw_haste = atoi(value.c_str());
getline(file, value, ',');
state->pw_invis = atoi(value.c_str());
getline(file, value, ',');
state->pw_regen = atoi(value.c_str());
getline(file, value, ',');
state->pw_flight = atoi(value.c_str());
getline(file, value, ',');
state->pw_scout = atoi(value.c_str());
getline(file, value, ',');
state->pw_guard = atoi(value.c_str());
getline(file, value, ',');
state->pw_doubler = atoi(value.c_str());
getline(file, value, ',');
state->pw_ammoregen = atoi(value.c_str());
getline(file, value, ',');
state->pw_invulnerability = atoi(value.c_str());
//////////////////
//// Ammo ////
getline(file, value, ',');
state->wp_gauntlet = atoi(value.c_str());
getline(file, value, ',');
state->wp_machinegun = atoi(value.c_str());
getline(file, value, ',');
state->wp_shotgun = atoi(value.c_str());
getline(file, value, ',');
state->wp_grenade_launcher = atoi(value.c_str());
getline(file, value, ',');
state->wp_rocket_launcher = atoi(value.c_str());
getline(file, value, ',');
state->wp_lightning = atoi(value.c_str());
getline(file, value, ',');
state->wp_railgun = atoi(value.c_str());
getline(file, value, ',');
state->wp_plasmagun = atoi(value.c_str());
getline(file, value, ',');
state->wp_bfg = atoi(value.c_str());
getline(file, value, ',');
state->wp_grappling_hook = atoi(value.c_str());
//////////////
//// Enemy Info ////
getline(file, value, ',');
state->enemy = atoi(value.c_str());
getline(file, value, ',');
state->enemy_line_dist = (float)atof(value.c_str());
getline(file, value, ',');
state->enemyposition_time = (float)atof(value.c_str());
getline(file, value, ',');
state->enemy_is_invisible = atoi(value.c_str());
getline(file, value, ',');
state->enemy_is_shooting = atoi(value.c_str());
getline(file, value, ',');
state->enemy_weapon = atoi(value.c_str());
////////////////////
//// Goal Info////
getline(file, value, ',');
state->goal_flags = atoi(value.c_str());
getline(file, value, ',');
state->item_type = atoi(value.c_str());
//////////////////
//// Exit Information ////
getline(file, value, ',');
state->last_enemy_area_exits = atoi(value.c_str());
getline(file, value, ',');
state->goal_area_exits = atoi(value.c_str());
getline(file, value, ',');
state->current_area_exits = atoi(value.c_str());
//////////////////////////
//// Area Numbers ////
getline(file, value, ',');
state->current_area_num = atoi(value.c_str());
getline(file, value, ',');
state->goal_area_num = atoi(value.c_str());
getline(file, value, ',');
state->enemy_area_num = atoi(value.c_str());
//////////////////////////
//// Misc ////
getline(file, value, ',');
state->tfl = atoi(value.c_str());
getline(file, value, ',');
state->last_hit_count = atoi(value.c_str());
//////////////
/***** END READING STATE *****/
/***** START READING FINAL STATE *****/
//// For calculated reward ////
getline(file, value, ',');
fstate->kill_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->death_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->health_diff = atoi(value.c_str());
getline(file, value, ',');
fstate->armor_diff = atoi(value.c_str());
getline(file, value, ',');
state->hit_count_diff = atoi(value.c_str());
///////////////////////////////
//// Stats ////
getline(file, value, ',');
fstate->stat_health = atoi(value.c_str());
getline(file, value, ',');
fstate->stat_armor = atoi(value.c_str());
getline(file, value, ',');
fstate->stat_max_health = atoi(value.c_str());
///////////////
//// Powerups ////
getline(file, value, ',');
fstate->pw_quad = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_battlesuit = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_haste = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_invis = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_regen = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_flight = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_scout = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_guard = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_doubler = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_ammoregen = atoi(value.c_str());
getline(file, value, ',');
fstate->pw_invulnerability = atoi(value.c_str());
//////////////////
//// Ammo ////
getline(file, value, ',');
fstate->wp_gauntlet = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_machinegun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_shotgun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_grenade_launcher = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_rocket_launcher = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_lightning = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_railgun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_plasmagun = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_bfg = atoi(value.c_str());
getline(file, value, ',');
fstate->wp_grappling_hook = atoi(value.c_str());
//////////////
//// Enemy Info ////
getline(file, value, ',');
fstate->enemy = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_line_dist = (float)atof(value.c_str());
getline(file, value, ',');
fstate->enemyposition_time = (float)atof(value.c_str());
getline(file, value, ',');
fstate->enemy_is_invisible = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_is_shooting = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_weapon = atoi(value.c_str());
////////////////////
//// Goal Info////
getline(file, value, ',');
fstate->goal_flags = atoi(value.c_str());
getline(file, value, ',');
fstate->item_type = atoi(value.c_str());
//////////////////
//// Exit Information ////
getline(file, value, ',');
fstate->last_enemy_area_exits = atoi(value.c_str());
getline(file, value, ',');
fstate->goal_area_exits = atoi(value.c_str());
getline(file, value, ',');
fstate->current_area_exits = atoi(value.c_str());
//////////////////////////
//// Area Numbers ////
getline(file, value, ',');
fstate->current_area_num = atoi(value.c_str());
getline(file, value, ',');
fstate->goal_area_num = atoi(value.c_str());
getline(file, value, ',');
fstate->enemy_area_num = atoi(value.c_str());
//////////////////////////
//// Misc ////
getline(file, value, ',');
fstate->tfl = atoi(value.c_str());
getline(file, value, '\n');
fstate->last_hit_count = atoi(value.c_str());
//////////////
/***** END READING FINAL STATE *****/
samples.insert(it, s);
it = samples.end();
}
file.close();
#ifdef CPU
host_vector<float> policy = agents[client]->updatePolicy(samples);
#else
device_vector<sample> dev_samples = samples;
host_vector<float> policy = agents[client]->updatePolicy(dev_samples);
#endif
// Write to file
ofstream outfile("lspi.pol");
for(int i = 0; i < policy.size(); i++)
{
if(i + 1 == policy.size())
{
outfile << fixed << setprecision(8) << policy[i] << endl;
}
else
{
outfile << fixed << setprecision(8) << policy[i] << ",";
}
}
outfile.close();
// Free space used by samples
for(int i = 0; i < samples.size(); i++)
{
free(samples[i].final_state);
free(samples[i].state);
}
#endif
}
#endif |
1a8a33b0c65c2cadd4a5528516fc942b72da574e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/v_measure.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
stream = resource::get_cuda_stream(handle);
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = raft::stats::homogeneity_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthCompleteness = raft::stats::homogeneity_score(predClusterArray.data(),
truthClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
// calling the v_measure CUDA implementation
computedVMeasure = raft::stats::v_measure(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange,
params.beta);
}
// declaring the data values
raft::resources handle;
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
hipStream_t stream = 0;
};
// setting test parameter values
const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001},
{200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001},
{10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001},
{300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001},
{200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001},
{10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001},
{300, 3, 99, 1.0, true, 0.000001}};
// writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result)
{
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 1a8a33b0c65c2cadd4a5528516fc942b72da574e.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/v_measure.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
stream = resource::get_cuda_stream(handle);
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = raft::stats::homogeneity_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthCompleteness = raft::stats::homogeneity_score(predClusterArray.data(),
truthClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
// calling the v_measure CUDA implementation
computedVMeasure = raft::stats::v_measure(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange,
params.beta);
}
// declaring the data values
raft::resources handle;
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001},
{200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001},
{10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001},
{300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001},
{200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001},
{10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001},
{300, 3, 99, 1.0, true, 0.000001}};
// writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result)
{
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
|
68138cdb934120d2a36ed69a8467997e3bf88c2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <cstdlib>
#include <time.h>
__global__ void just_launch(){ };
int main(int argc, char** argv){
if (argc != 4){
std::cout << "number_of_blocks number_of_threads cycles" << std::endl;
};
hipError_t status;
struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC, &start);
status = hipFree(0); if (status != hipSuccess){ std::cout << hipGetErrorString(status) << std::endl; };
for (int i = 0; i < atoi(argv[3]); ++i){
hipLaunchKernelGGL(( just_launch), dim3(atoi(argv[1])), dim3(atoi(argv[2])), 0, 0, );
};
status = hipDeviceSynchronize(); if (status != hipSuccess){ std::cout << hipGetErrorString(status) << std::endl; };
clock_gettime(CLOCK_MONOTONIC, &stop);
double secs = (double)(stop.tv_sec - start.tv_sec) + (stop.tv_nsec/1000000.0 - start.tv_nsec/1000000.0)/1000.0;
std::cout << "name,Duration" << std::endl;
std::cout << "just_launch " << secs << std::endl;
status = hipDeviceReset(); if (status != hipSuccess){ std::cout << hipGetErrorString(status) << std::endl; };
};
| 68138cdb934120d2a36ed69a8467997e3bf88c2c.cu | #include <cuda_runtime_api.h>
#include <iostream>
#include <cstdlib>
#include <time.h>
__global__ void just_launch(){ };
int main(int argc, char** argv){
if (argc != 4){
std::cout << "number_of_blocks number_of_threads cycles" << std::endl;
};
cudaError_t status;
struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC, &start);
status = cudaFree(0); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; };
for (int i = 0; i < atoi(argv[3]); ++i){
just_launch<<<atoi(argv[1]), atoi(argv[2]), 0>>>();
};
status = cudaDeviceSynchronize(); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; };
clock_gettime(CLOCK_MONOTONIC, &stop);
double secs = (double)(stop.tv_sec - start.tv_sec) + (stop.tv_nsec/1000000.0 - start.tv_nsec/1000000.0)/1000.0;
std::cout << "name,Duration" << std::endl;
std::cout << "just_launch " << secs << std::endl;
status = cudaDeviceReset(); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; };
};
|
4c0c2c78137e4a3ded5591390343d4d0144c2745.hip | // !!! This is a file automatically generated by hipify!!!
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "helper.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc == 1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
for (int c = 0; c<nc; c++)
{
aOut[(nc - 1 - c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc == 1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
for (int c = 0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc - 1 - c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
void showHistogram256(const char *windowTitle, int *histogram, int windowX, int windowY)
{
const int nbins = 256;
cv::Mat canvas = cv::Mat::ones(125, 512, CV_8UC3);
float hmax = 0;
for (int i = 0; i < nbins; ++i)
hmax = max((int)hmax, histogram[i]);
for (int j = 0, rows = canvas.rows; j < nbins - 1; j++)
{
for (int i = 0; i < 2; ++i)
cv::line(
canvas,
cv::Point(j * 2 + i, rows),
cv::Point(j * 2 + i, rows - (histogram[j] * 125.0f) / hmax),
cv::Scalar(255, 128, 0),
1, 8, 0
);
}
showImage(windowTitle, canvas, windowX, windowY);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand() / RAND_MAX;
float x2 = (float)rand() / RAND_MAX;
return sigma * sqrtf(-2 * log(::max(x1, 0.000001f)))*cosf(2 * M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for (size_t i = 0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
system("pause");
exit(1);
}
prev_file = file;
prev_line = line;
}
| 4c0c2c78137e4a3ded5591390343d4d0144c2745.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "helper.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc == 1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
for (int c = 0; c<nc; c++)
{
aOut[(nc - 1 - c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc == 1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
for (int c = 0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc - 1 - c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
void showHistogram256(const char *windowTitle, int *histogram, int windowX, int windowY)
{
const int nbins = 256;
cv::Mat canvas = cv::Mat::ones(125, 512, CV_8UC3);
float hmax = 0;
for (int i = 0; i < nbins; ++i)
hmax = max((int)hmax, histogram[i]);
for (int j = 0, rows = canvas.rows; j < nbins - 1; j++)
{
for (int i = 0; i < 2; ++i)
cv::line(
canvas,
cv::Point(j * 2 + i, rows),
cv::Point(j * 2 + i, rows - (histogram[j] * 125.0f) / hmax),
cv::Scalar(255, 128, 0),
1, 8, 0
);
}
showImage(windowTitle, canvas, windowX, windowY);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand() / RAND_MAX;
float x2 = (float)rand() / RAND_MAX;
return sigma * sqrtf(-2 * log(std::max(x1, 0.000001f)))*cosf(2 * M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for (size_t i = 0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
system("pause");
exit(1);
}
prev_file = file;
prev_line = line;
}
|
4cf1d380e29cef953c61560e80bf2fd2bee831d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/ConvUtils.h>
#include <algorithm>
#include <tuple>
#include <limits>
using namespace at;
using namespace native;
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW>
__global__ void conv_depthwise3d_cuda_kernel(
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> output,
const PackedTensorAccessor32<scalar_t, 5> kernel,
const scalar_t* bias,
int strideT, int strideH, int strideW,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_)
{
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = output.size(1);
const int oT = output.size(2);
const int oH = output.size(3);
const int oW = output.size(4);
const int iC = input.size(1);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int num_output = output.size(0) * output.stride(0);
CUDA_KERNEL_LOOP(index, num_output) {
const int out_col = index % oW;
const int out_row = (index / oW) % oH;
const int out_frame = (index / oW / oH) % oT;
const int out_channel = (index / oW / oH / oT) % oC;
const int batch = index / oW / oH / oT / oC;
const int in_channel = out_channel / channel_multiplier;
const int in_col_start = out_col * strideW - paddingW;
const int in_row_start = out_row * strideH - paddingH;
const int in_frame_start = out_frame * strideT - paddingT;
accscalar_t sum = 0;
const scalar_t *kernel_ptr = kernel[out_channel].data();
const scalar_t *input_ptr =
&input[batch][in_channel][in_frame_start][in_row_start][in_col_start];
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int in_frame = in_frame_start + k_frame * dilationT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int in_row = in_row_start + k_row * dilationH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int in_col = in_col_start + k_col * dilationW;
if (in_frame >= 0 && in_row >= 0 && in_col >= 0 &&
in_frame < iT && in_row < iH && in_col < iW) {
sum += op1 * *(input_ptr);
}
input_ptr += dilationW;
}
input_ptr += iW * dilationH - kW * dilationW;
}
input_ptr += iW * (iH * dilationT - kH * dilationH);
}
if (bias != NULL) {
sum += bias[out_channel];
}
output[batch][out_channel][out_frame][out_row][out_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW,
int kKnownStrideT, int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_input_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
PackedTensorAccessor32<scalar_t, 5> grad_input,
const PackedTensorAccessor32<scalar_t, 5> kernel,
int strideT_, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_) {
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = grad_output.size(1);
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iC = grad_input.size(1);
const int iT = grad_input.size(2);
const int iH = grad_input.size(3);
const int iW = grad_input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_;
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int num_input = grad_input.size(0) * grad_input.stride(0);
CUDA_KERNEL_LOOP(index, num_input) {
const int in_col = index % iW;
const int in_row = (index / iW) % iH;
const int in_frame = (index / iW / iH) % iT;
const int in_channel = (index / iW / iH / iT) % iC;
const int batch = index / iW / iH / iT / iC;
const int out_col_end = in_col + paddingW;
const int out_row_end = in_row + paddingH;
const int out_frame_end = in_frame + paddingT;
const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data();
accscalar_t sum = 0;
for (int k_chn = in_channel * channel_multiplier;
k_chn < (in_channel + 1) * channel_multiplier;
++k_chn) {
const scalar_t* gout_ptr = grad_output[batch][k_chn].data();
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int out_frame_raw = out_frame_end - k_frame * dilationT;
const int out_frame = out_frame_raw / strideT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int out_row_raw = out_row_end - k_row * dilationH;
const int out_row = out_row_raw / strideH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int out_col_raw = out_col_end - k_col * dilationW;
const int out_col = out_col_raw / strideW;
const int out_offs = (out_frame * oH + out_row) * oW + out_col;
accscalar_t op2 = (accscalar_t)0;
if (out_col >= 0 && out_row >= 0 && out_frame >= 0 &&
out_col < oW && out_row < oH && out_frame < oT) {
op2 = *(gout_ptr + out_offs);
}
if (out_frame * strideT == out_frame_raw &&
out_row * strideH == out_row_raw &&
out_col * strideW == out_col_raw) {
sum += op1 * op2;
}
}
}
}
}
grad_input[batch][in_channel][in_frame][in_row][in_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_weight_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> grad_kernel,
int strideT, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT, int dilationH, int dilationW) {
const int kC = grad_kernel.size(0);
const int kT = grad_kernel.size(2);
const int kH = grad_kernel.size(3);
const int kW = grad_kernel.size(4);
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int k_col = blockIdx.x % kW;
const int k_row = (blockIdx.x / kW) % kH;
const int k_frame = (blockIdx.x / kW / kH) % kT;
const int k_channel = blockIdx.x / kW / kH / kT;
scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col];
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = grad_output.size(1) / input.size(1);
const int in_channel = k_channel / channel_multiplier;
extern __shared__ int sdata_raw[];
scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw);
if (k_channel >= kC) {
return;
}
const int laneid = threadIdx.x % C10_WARP_SIZE;
const int warpid = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
accscalar_t grad = 0;
int batch = warpid / oT;
int gout_frame = warpid - batch * oT;
for (int outer_pos = warpid; outer_pos < input.size(0) * oT;
outer_pos += nwarps, gout_frame += nwarps) {
while (gout_frame >= oT) { gout_frame -= oT; batch ++; }
const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT;
if (in_frame < 0 || in_frame >= iT) {
continue;
}
const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid;
const scalar_t* input_ptr = input[batch][in_channel][in_frame].data();
int gout_row = laneid / oW;
int gout_col = laneid - gout_row * oW;
for (; gout_row < oH; ) {
const accscalar_t op1 = *(gout_ptr);
gout_ptr += C10_WARP_SIZE;
const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW;
const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH;
const int in_pos = in_row * iW + in_col;
accscalar_t op2 = (accscalar_t)0;
if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) {
op2 = *(input_ptr + in_pos);
}
gout_col += C10_WARP_SIZE;
while (gout_col >= oW) {
gout_col -= oW; gout_row ++;
}
grad += op1 * op2;
}
}
sdata[threadIdx.x] = grad;
__syncthreads();
CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1);
#pragma unroll
for (int i = blockDim.x / 2; i >= 1; i >>= 1) {
if (threadIdx.x < i) {
sdata[threadIdx.x] += sdata[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = sdata[0];
}
}
template <int dim>
void conv_depthwise_shape_check(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(kernel_size.size() == dim,
"kernel size length should be ", dim, ", but got ", kernel_size.size());
TORCH_CHECK(stride.size() == dim,
"stride length should be ", dim, ", but got ", stride.size());
TORCH_CHECK(padding.size() == dim,
"padding length should be ", dim, ", but got ", padding.size());
TORCH_CHECK(dilation.size() == dim,
"dilation length should be ", dim, ", but got ", dilation.size());
TORCH_CHECK(weight.defined(),
"Weight must be defined.");
TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2,
"Input dimension should be ",
dim + 1, "D or ", dim + 2, "D, got ",
input.dim(), "D");
TORCH_CHECK(weight.dim() == dim + 2,
"Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D");
TORCH_CHECK(weight.size(1) == 1,
"Depthwise weight should have in_channels=1, got ", weight.size(1));
TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0,
"Depthwise out channels should be a multiple of in channels, got ",
weight.size(0), " and ", input.size(-dim - 1));
for (int i = 0; i < dim; ++i) {
TORCH_CHECK(weight.size(i + 2) == kernel_size[i],
"kernel size and weight size mismatch, got ",
kernel_size, " and ", weight.sizes());
TORCH_CHECK(stride[i] >= 1,
"stride should be at least 1, got ", stride);
TORCH_CHECK(padding[i] >= 0,
"padding should be non-negative, got ", padding);
TORCH_CHECK(dilation[i] >= 1,
"dilation should be at least 1, got ", dilation);
}
if (bias.defined()) {
TORCH_CHECK(bias.dim() == 1,
"Bias should be 1D tensor, got ", bias.dim(), "D");
TORCH_CHECK(bias.size(0) == weight.size(0),
"Bias length should be equal to out_channels, got ",
bias.size(0), " and ", weight.size(0));
}
if (grad_output.defined()) {
auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(),
padding, stride, dilation);
TORCH_CHECK(grad_output.dim() == expected_output_size.size(),
"Expect grad_output to be ",
expected_output_size.size(), "D, got ",
grad_output.dim(), "D.");
for (int i = 0; i < grad_output.dim(); ++i) {
TORCH_CHECK(grad_output.size(i) == expected_output_size[i],
"Expect grad_output to be of same shape as output, got ",
grad_output.size(i), " and ", expected_output_size[i],
" at dimension ", i);
}
}
}
#define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y))
#define NODEF_OR_EQUAL_3(x, y1, y2, y3) \
(NODEF_OR_EQUAL(x[0], y1) && \
NODEF_OR_EQUAL(x[1], y2) && \
NODEF_OR_EQUAL(x[2], y3))
#define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)>) \
, dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_FORWARD_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \
<scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1>) \
, dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
Tensor conv_depthwise3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device.");
if (bias.defined()) {
TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device.");
}
conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */,
kernel_size, stride, padding, dilation);
Tensor input_ = input.contiguous();
if (input.dim() == 4 /* no batch */) {
input_ = input.unsqueeze(0);
}
auto output_size = conv_output_size(input_.sizes(), weight.sizes(),
padding, stride, dilation);
for (size_t i = 0; i < output_size.size(); ++i) {
TORCH_CHECK(output_size[i] > 0,
"Output size should be positive, got ", output_size[i], " at dim ", i);
}
Tensor output = at::empty(output_size, input.options());
Tensor output_ = output;
Tensor weight_ = weight.contiguous();
Tensor bias_ = bias.defined() ? bias.contiguous() : bias;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"conv_depthwise3d",
[&]{
int64_t num_outputs = output_.numel();
int64_t block = 256;
int64_t grid = ::min((num_outputs - 1) / block + 1, (int64_t)65536);
int64_t smem = 0;
const scalar_t* bias_ptr =
bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL;
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_OTHERS
}
);
return output;
}
#undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION
#undef DWCONV3D_FORWARD_DISPATCH_OTHERS
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \
kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \
NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \
if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, (dh), (dw)>) \
, dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, -1, -1>) \
, dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask)
{
TORCH_CHECK(grad_output.device() == input.device() &&
input.device() == weight.device(),
"expects input, weight and grad_output to be on the same device.");
conv_depthwise_shape_check<3>(
input, weight, Tensor() /* undefined */, grad_output,
kernel_size, stride, padding, dilation);
const Tensor grad_output_ = grad_output.contiguous();
const Tensor input_ = input.contiguous();
const Tensor weight_ = weight.contiguous();
Tensor grad_input_ =
(output_mask[0] ? grad_input
: Tensor());
if (output_mask[0]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t num_inputs = grad_input_.numel();
int64_t block = 256;
int64_t grid = ::min((num_inputs - 1) / block + 1, (int64_t)65536);
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
}
);
}
if (output_mask[1]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t grid = grad_weight.numel();
int64_t block = 256;
int64_t smem = sizeof(scalar_t) * block;
const int64_t int_max = std::numeric_limits<int32_t>::max();
TORCH_CHECK(grad_input_.numel() <= int_max,
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= int_max,
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= int_max,
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max,
"Padded input tensor is too large.");
}
TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / C10_WARP_SIZE &&
grad_output_.size(3) <= int_max - C10_WARP_SIZE &&
grad_output_.size(4) <= int_max - C10_WARP_SIZE,
"Output size is too large.");
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS
}
);
}
if (output_mask[2]) {
grad_bias = grad_output.sum({0, 2, 3, 4});
}
return std::tie(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
{true,true,true});
}
std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask) {
auto options = grad_output.options();
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : Tensor());
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : Tensor());
Tensor grad_bias; /* undefined temporarily */
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
output_mask
);
}
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
#undef NODEF_OR_EQUAL_3
#undef NODEF_OR_EQUAL | 4cf1d380e29cef953c61560e80bf2fd2bee831d9.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/ConvUtils.h>
#include <algorithm>
#include <tuple>
#include <limits>
using namespace at;
using namespace native;
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW>
__global__ void conv_depthwise3d_cuda_kernel(
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> output,
const PackedTensorAccessor32<scalar_t, 5> kernel,
const scalar_t* bias,
int strideT, int strideH, int strideW,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_)
{
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = output.size(1);
const int oT = output.size(2);
const int oH = output.size(3);
const int oW = output.size(4);
const int iC = input.size(1);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int num_output = output.size(0) * output.stride(0);
CUDA_KERNEL_LOOP(index, num_output) {
const int out_col = index % oW;
const int out_row = (index / oW) % oH;
const int out_frame = (index / oW / oH) % oT;
const int out_channel = (index / oW / oH / oT) % oC;
const int batch = index / oW / oH / oT / oC;
const int in_channel = out_channel / channel_multiplier;
const int in_col_start = out_col * strideW - paddingW;
const int in_row_start = out_row * strideH - paddingH;
const int in_frame_start = out_frame * strideT - paddingT;
accscalar_t sum = 0;
const scalar_t *kernel_ptr = kernel[out_channel].data();
const scalar_t *input_ptr =
&input[batch][in_channel][in_frame_start][in_row_start][in_col_start];
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int in_frame = in_frame_start + k_frame * dilationT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int in_row = in_row_start + k_row * dilationH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int in_col = in_col_start + k_col * dilationW;
if (in_frame >= 0 && in_row >= 0 && in_col >= 0 &&
in_frame < iT && in_row < iH && in_col < iW) {
sum += op1 * *(input_ptr);
}
input_ptr += dilationW;
}
input_ptr += iW * dilationH - kW * dilationW;
}
input_ptr += iW * (iH * dilationT - kH * dilationH);
}
if (bias != NULL) {
sum += bias[out_channel];
}
output[batch][out_channel][out_frame][out_row][out_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownKernelT, int kKnownKernelH, int kKnownKernelW,
int kKnownDilationT, int kKnownDilationH, int kKnownDilationW,
int kKnownStrideT, int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_input_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
PackedTensorAccessor32<scalar_t, 5> grad_input,
const PackedTensorAccessor32<scalar_t, 5> kernel,
int strideT_, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_) {
const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2);
const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3);
const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4);
const int oC = grad_output.size(1);
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iC = grad_input.size(1);
const int iT = grad_input.size(2);
const int iH = grad_input.size(3);
const int iW = grad_input.size(4);
const int channel_multiplier = oC / iC;
const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_;
const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_;
const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_;
const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_;
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int num_input = grad_input.size(0) * grad_input.stride(0);
CUDA_KERNEL_LOOP(index, num_input) {
const int in_col = index % iW;
const int in_row = (index / iW) % iH;
const int in_frame = (index / iW / iH) % iT;
const int in_channel = (index / iW / iH / iT) % iC;
const int batch = index / iW / iH / iT / iC;
const int out_col_end = in_col + paddingW;
const int out_row_end = in_row + paddingH;
const int out_frame_end = in_frame + paddingT;
const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data();
accscalar_t sum = 0;
for (int k_chn = in_channel * channel_multiplier;
k_chn < (in_channel + 1) * channel_multiplier;
++k_chn) {
const scalar_t* gout_ptr = grad_output[batch][k_chn].data();
for (int k_frame = 0; k_frame < kT; ++k_frame) {
const int out_frame_raw = out_frame_end - k_frame * dilationT;
const int out_frame = out_frame_raw / strideT;
for (int k_row = 0; k_row < kH; ++k_row) {
const int out_row_raw = out_row_end - k_row * dilationH;
const int out_row = out_row_raw / strideH;
for (int k_col = 0; k_col < kW; ++k_col) {
const accscalar_t op1 = *(kernel_ptr++);
const int out_col_raw = out_col_end - k_col * dilationW;
const int out_col = out_col_raw / strideW;
const int out_offs = (out_frame * oH + out_row) * oW + out_col;
accscalar_t op2 = (accscalar_t)0;
if (out_col >= 0 && out_row >= 0 && out_frame >= 0 &&
out_col < oW && out_row < oH && out_frame < oT) {
op2 = *(gout_ptr + out_offs);
}
if (out_frame * strideT == out_frame_raw &&
out_row * strideH == out_row_raw &&
out_col * strideW == out_col_raw) {
sum += op1 * op2;
}
}
}
}
}
grad_input[batch][in_channel][in_frame][in_row][in_col] = sum;
}
}
template <typename scalar_t, typename accscalar_t,
int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_weight_kernel(
const PackedTensorAccessor32<scalar_t, 5> grad_output,
const PackedTensorAccessor32<scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> grad_kernel,
int strideT, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT, int dilationH, int dilationW) {
const int kC = grad_kernel.size(0);
const int kT = grad_kernel.size(2);
const int kH = grad_kernel.size(3);
const int kW = grad_kernel.size(4);
const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_;
const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_;
const int k_col = blockIdx.x % kW;
const int k_row = (blockIdx.x / kW) % kH;
const int k_frame = (blockIdx.x / kW / kH) % kT;
const int k_channel = blockIdx.x / kW / kH / kT;
scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col];
const int oT = grad_output.size(2);
const int oH = grad_output.size(3);
const int oW = grad_output.size(4);
const int iT = input.size(2);
const int iH = input.size(3);
const int iW = input.size(4);
const int channel_multiplier = grad_output.size(1) / input.size(1);
const int in_channel = k_channel / channel_multiplier;
extern __shared__ int sdata_raw[];
scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw);
if (k_channel >= kC) {
return;
}
const int laneid = threadIdx.x % C10_WARP_SIZE;
const int warpid = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
accscalar_t grad = 0;
int batch = warpid / oT;
int gout_frame = warpid - batch * oT;
for (int outer_pos = warpid; outer_pos < input.size(0) * oT;
outer_pos += nwarps, gout_frame += nwarps) {
while (gout_frame >= oT) { gout_frame -= oT; batch ++; }
const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT;
if (in_frame < 0 || in_frame >= iT) {
continue;
}
const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid;
const scalar_t* input_ptr = input[batch][in_channel][in_frame].data();
int gout_row = laneid / oW;
int gout_col = laneid - gout_row * oW;
for (; gout_row < oH; ) {
const accscalar_t op1 = *(gout_ptr);
gout_ptr += C10_WARP_SIZE;
const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW;
const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH;
const int in_pos = in_row * iW + in_col;
accscalar_t op2 = (accscalar_t)0;
if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) {
op2 = *(input_ptr + in_pos);
}
gout_col += C10_WARP_SIZE;
while (gout_col >= oW) {
gout_col -= oW; gout_row ++;
}
grad += op1 * op2;
}
}
sdata[threadIdx.x] = grad;
__syncthreads();
CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1);
#pragma unroll
for (int i = blockDim.x / 2; i >= 1; i >>= 1) {
if (threadIdx.x < i) {
sdata[threadIdx.x] += sdata[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = sdata[0];
}
}
template <int dim>
void conv_depthwise_shape_check(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(kernel_size.size() == dim,
"kernel size length should be ", dim, ", but got ", kernel_size.size());
TORCH_CHECK(stride.size() == dim,
"stride length should be ", dim, ", but got ", stride.size());
TORCH_CHECK(padding.size() == dim,
"padding length should be ", dim, ", but got ", padding.size());
TORCH_CHECK(dilation.size() == dim,
"dilation length should be ", dim, ", but got ", dilation.size());
TORCH_CHECK(weight.defined(),
"Weight must be defined.");
TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2,
"Input dimension should be ",
dim + 1, "D or ", dim + 2, "D, got ",
input.dim(), "D");
TORCH_CHECK(weight.dim() == dim + 2,
"Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D");
TORCH_CHECK(weight.size(1) == 1,
"Depthwise weight should have in_channels=1, got ", weight.size(1));
TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0,
"Depthwise out channels should be a multiple of in channels, got ",
weight.size(0), " and ", input.size(-dim - 1));
for (int i = 0; i < dim; ++i) {
TORCH_CHECK(weight.size(i + 2) == kernel_size[i],
"kernel size and weight size mismatch, got ",
kernel_size, " and ", weight.sizes());
TORCH_CHECK(stride[i] >= 1,
"stride should be at least 1, got ", stride);
TORCH_CHECK(padding[i] >= 0,
"padding should be non-negative, got ", padding);
TORCH_CHECK(dilation[i] >= 1,
"dilation should be at least 1, got ", dilation);
}
if (bias.defined()) {
TORCH_CHECK(bias.dim() == 1,
"Bias should be 1D tensor, got ", bias.dim(), "D");
TORCH_CHECK(bias.size(0) == weight.size(0),
"Bias length should be equal to out_channels, got ",
bias.size(0), " and ", weight.size(0));
}
if (grad_output.defined()) {
auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(),
padding, stride, dilation);
TORCH_CHECK(grad_output.dim() == expected_output_size.size(),
"Expect grad_output to be ",
expected_output_size.size(), "D, got ",
grad_output.dim(), "D.");
for (int i = 0; i < grad_output.dim(); ++i) {
TORCH_CHECK(grad_output.size(i) == expected_output_size[i],
"Expect grad_output to be of same shape as output, got ",
grad_output.size(i), " and ", expected_output_size[i],
" at dimension ", i);
}
}
}
#define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y))
#define NODEF_OR_EQUAL_3(x, y1, y2, y3) \
(NODEF_OR_EQUAL(x[0], y1) && \
NODEF_OR_EQUAL(x[1], y2) && \
NODEF_OR_EQUAL(x[2], y3))
#define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)> \
<<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_FORWARD_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_kernel \
<scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1> \
<<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \
input_.packed_accessor32<scalar_t, 5>(), \
output_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
bias_ptr, \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
Tensor conv_depthwise3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device.");
if (bias.defined()) {
TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device.");
}
conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */,
kernel_size, stride, padding, dilation);
Tensor input_ = input.contiguous();
if (input.dim() == 4 /* no batch */) {
input_ = input.unsqueeze(0);
}
auto output_size = conv_output_size(input_.sizes(), weight.sizes(),
padding, stride, dilation);
for (size_t i = 0; i < output_size.size(); ++i) {
TORCH_CHECK(output_size[i] > 0,
"Output size should be positive, got ", output_size[i], " at dim ", i);
}
Tensor output = at::empty(output_size, input.options());
Tensor output_ = output;
Tensor weight_ = weight.contiguous();
Tensor bias_ = bias.defined() ? bias.contiguous() : bias;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"conv_depthwise3d",
[&]{
int64_t num_outputs = output_.numel();
int64_t block = 256;
int64_t grid = std::min((num_outputs - 1) / block + 1, (int64_t)65536);
int64_t smem = 0;
const scalar_t* bias_ptr =
bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL;
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1)
DWCONV3D_FORWARD_DISPATCH_OTHERS
}
);
return output;
}
#undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION
#undef DWCONV3D_FORWARD_DISPATCH_OTHERS
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \
kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \
if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \
NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \
NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
weight_.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \
if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, (dh), (dw)> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
} else
#define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \
{ \
using accscalar_t = acc_type<scalar_t, true>; \
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, -1, -1> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
grad_output_.packed_accessor32<scalar_t, 5>(), \
input_.packed_accessor32<scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
}
std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask)
{
TORCH_CHECK(grad_output.device() == input.device() &&
input.device() == weight.device(),
"expects input, weight and grad_output to be on the same device.");
conv_depthwise_shape_check<3>(
input, weight, Tensor() /* undefined */, grad_output,
kernel_size, stride, padding, dilation);
const Tensor grad_output_ = grad_output.contiguous();
const Tensor input_ = input.contiguous();
const Tensor weight_ = weight.contiguous();
Tensor grad_input_ =
(output_mask[0] ? grad_input
: Tensor());
if (output_mask[0]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t num_inputs = grad_input_.numel();
int64_t block = 256;
int64_t grid = std::min((num_inputs - 1) / block + 1, (int64_t)65536);
// Range check to avoid overflow in CUDA kernels.
TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(),
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(),
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(),
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(),
"Padded input tensor is too large.");
}
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, 1, 1, 1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, 1, 1, 1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION(
3, 3, 3, -1, -1, -1, -1, -1, -1)
DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
}
);
}
if (output_mask[1]) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(),
"conv_depthwise3d",
[&] {
int64_t grid = grad_weight.numel();
int64_t block = 256;
int64_t smem = sizeof(scalar_t) * block;
const int64_t int_max = std::numeric_limits<int32_t>::max();
TORCH_CHECK(grad_input_.numel() <= int_max,
"Input tensor is too large.");
TORCH_CHECK(grad_output_.numel() <= int_max,
"Output tensor is too large.");
TORCH_CHECK(weight_.numel() <= int_max,
"Weight tensor is too large.");
for (int i = 0; i < 3; ++i) {
TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max,
"Padded input tensor is too large.");
}
TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / C10_WARP_SIZE &&
grad_output_.size(3) <= int_max - C10_WARP_SIZE &&
grad_output_.size(4) <= int_max - C10_WARP_SIZE,
"Output size is too large.");
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2)
DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS
}
);
}
if (output_mask[2]) {
grad_bias = grad_output.sum({0, 2, 3, 4});
}
return std::tie(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation) {
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
{true,true,true});
}
std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
const std::array<bool, 3> output_mask) {
auto options = grad_output.options();
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : Tensor());
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : Tensor());
Tensor grad_bias; /* undefined temporarily */
return _depthwise_3d_backward_cuda_out(
grad_input,
grad_weight,
grad_bias,
grad_output,
input,
weight,
kernel_size,
stride,
padding,
dilation,
output_mask
);
}
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION
#undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS
#undef NODEF_OR_EQUAL_3
#undef NODEF_OR_EQUAL |
8a6e889c8496086fb3eb7db8c90608312674b4bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <ctime>
#include <time.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <hipfft.h>
#include <fstream>
using namespace std;
typedef float2 Complex;
int main()
{
int test_batch[] = {8, 16, 32, 64, 128, 192, 256};
//int test_batch[] = {320, 384, 448};
int test_number = 1000000;
for (int iter = 13; iter <= 13; ++iter)
{
for (int ibatch = 0; ibatch <= 6; ++ibatch)
{
int batch = test_batch[ibatch];
int N = iter;
int SIZE = N*N;
Complex *fg;
fg = new Complex[SIZE*batch];
for (int j = 0; j < batch; ++j)
for (int i = 0; i < SIZE; i++){
fg[i + j * SIZE].x = 1;
fg[i + j * SIZE].y = 0;
}
int mem_size = sizeof(Complex)* SIZE * batch;
Complex *d_signal;
checkCudaErrors(hipMalloc((void **) &(d_signal), mem_size));
checkCudaErrors(hipMemcpyAsync(d_signal, fg, mem_size,
hipMemcpyHostToDevice));
hipDeviceSynchronize();
// CUFFT plan
hipfftHandle plan;
int s[2] = {N, N};
int inembed[2] = {N, batch};
hipfftPlanMany(&plan,2, s, inembed, batch, 1, inembed, batch, 1, HIPFFT_C2C, batch);
// Transform signal and filter
clock_t start, end;
start = clock();
for (int j = 0; j < test_number / batch; ++j) {
hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal,
HIPFFT_FORWARD);
hipDeviceSynchronize();
}
end = clock();
double fft_time = (double)(end - start) / CLOCKS_PER_SEC;
printf("forwardsize%d:batchsize%d:%.6lf\n", N, batch, fft_time);
start = clock();
for (int j = 0; j < test_number / batch; ++j) {
hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal,
HIPFFT_BACKWARD);
hipDeviceSynchronize();
}
end = clock();
fft_time = (double)(end - start) / CLOCKS_PER_SEC;
printf("backwardsize%d:batchsize%d:%.6lf\n", N, batch, fft_time);
Complex * result;
result = new Complex[SIZE*batch];
hipMemcpyAsync(result, d_signal, sizeof(Complex)*SIZE,
hipMemcpyDeviceToHost);
delete result;
delete fg;
hipfftDestroy(plan);
//hipfftDestroy(plan2);
hipFree(d_signal);
}
}
}
| 8a6e889c8496086fb3eb7db8c90608312674b4bd.cu | #include <cuda_runtime.h>
#include "device_launch_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <ctime>
#include <time.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <cufft.h>
#include <fstream>
using namespace std;
typedef float2 Complex;
int main()
{
int test_batch[] = {8, 16, 32, 64, 128, 192, 256};
//int test_batch[] = {320, 384, 448};
int test_number = 1000000;
for (int iter = 13; iter <= 13; ++iter)
{
for (int ibatch = 0; ibatch <= 6; ++ibatch)
{
int batch = test_batch[ibatch];
int N = iter;
int SIZE = N*N;
Complex *fg;
fg = new Complex[SIZE*batch];
for (int j = 0; j < batch; ++j)
for (int i = 0; i < SIZE; i++){
fg[i + j * SIZE].x = 1;
fg[i + j * SIZE].y = 0;
}
int mem_size = sizeof(Complex)* SIZE * batch;
Complex *d_signal;
checkCudaErrors(cudaMalloc((void **) &(d_signal), mem_size));
checkCudaErrors(cudaMemcpyAsync(d_signal, fg, mem_size,
cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
// CUFFT plan
cufftHandle plan;
int s[2] = {N, N};
int inembed[2] = {N, batch};
cufftPlanMany(&plan,2, s, inembed, batch, 1, inembed, batch, 1, CUFFT_C2C, batch);
// Transform signal and filter
clock_t start, end;
start = clock();
for (int j = 0; j < test_number / batch; ++j) {
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal,
CUFFT_FORWARD);
cudaDeviceSynchronize();
}
end = clock();
double fft_time = (double)(end - start) / CLOCKS_PER_SEC;
printf("forwardsize%d:batchsize%d:%.6lf\n", N, batch, fft_time);
start = clock();
for (int j = 0; j < test_number / batch; ++j) {
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal,
CUFFT_INVERSE);
cudaDeviceSynchronize();
}
end = clock();
fft_time = (double)(end - start) / CLOCKS_PER_SEC;
printf("backwardsize%d:batchsize%d:%.6lf\n", N, batch, fft_time);
Complex * result;
result = new Complex[SIZE*batch];
cudaMemcpyAsync(result, d_signal, sizeof(Complex)*SIZE,
cudaMemcpyDeviceToHost);
delete result;
delete fg;
cufftDestroy(plan);
//cufftDestroy(plan2);
cudaFree(d_signal);
}
}
}
|
cgemv_fermi.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu normal z -> c, Tue Feb 9 16:05:35 2016
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define PRECISION_c
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
//////////////////////////////////////////////////////////////////////////////////////////
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
cgemvn_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
//////////////////////////////////////////////////////////////////////////////////////////
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
cgemvc_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
//////////////////////////////////////////////////////////////////////////////////////////
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvn_template_fermi(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
hipLaunchKernelGGL(( cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
//////////////////////////////////////////////////////////////////////////////////////////
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta COMPLEX
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy COMPLEX array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas2
********************************************************************/
extern "C" void
magmablas_cgemv_q(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
if (m <= 256) {
cgemvn_template_fermi<version(N, 137)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
cgemvn_template_fermi<version(N, 140)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
else {
cgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
| cgemv_fermi.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu normal z -> c, Tue Feb 9 16:05:35 2016
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define PRECISION_c
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
//////////////////////////////////////////////////////////////////////////////////////////
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
cgemvn_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
//////////////////////////////////////////////////////////////////////////////////////////
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
cgemvc_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
//////////////////////////////////////////////////////////////////////////////////////////
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvn_template_fermi(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
//////////////////////////////////////////////////////////////////////////////////////////
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta COMPLEX
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy COMPLEX array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas2
********************************************************************/
extern "C" void
magmablas_cgemv_q(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
if (m <= 256) {
cgemvn_template_fermi<version(N, 137)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
cgemvn_template_fermi<version(N, 140)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
else {
cgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
|
57bde74b64587b869778b531afabc027d93b4e34.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "Static/ClusteringCoefficient/cc.cuh"
#include "Static/TriangleCounting/triangle2.cuh"
using namespace xlib;
namespace hornets_nest {
ClusteringCoefficient::ClusteringCoefficient(HornetGraph& hornet) :
TriangleCounting2(hornet)
// StaticAlgorithm(hornet)
{
// tri = new TriangleCounting2(hornet);
}
ClusteringCoefficient::~ClusteringCoefficient(){
// tri->release();
TriangleCounting2::release();
release();
}
struct OPERATOR_LocalClusteringCoefficients {
triangle_t *d_triPerVertex;
clusterCoeff_t *d_ccLocal;
OPERATOR (Vertex &vertex) {
degree_t deg = vertex.degree();
d_ccLocal[vertex.id()] = 0;
if(deg>1){
d_ccLocal[vertex.id()] = (clusterCoeff_t)d_triPerVertex[vertex.id()]/(clusterCoeff_t)(deg*(deg-1));
}
}
};
void ClusteringCoefficient::reset(){
TriangleCounting2::reset();
}
void ClusteringCoefficient::run(){
TriangleCounting2::run();
forAllVertices(hornet, OPERATOR_LocalClusteringCoefficients { triPerVertex,d_ccLocal });
// int* d_ccLocalInt;
// int sumInt=gpu::reduce(d_ccLocalInt, 10);
// clusterCoeff_t sum=gpu::reduce(d_ccLocal, hornet.nV());
}
void ClusteringCoefficient::release(){
gpu::free(d_ccLocal);
d_ccLocal = nullptr;
}
void ClusteringCoefficient::init(){
//printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV());
gpu::allocate(d_ccLocal, hornet.nV());
TriangleCounting2::init();
reset();
}
void ClusteringCoefficient::copyLocalClusCoeffToHost(clusterCoeff_t* h_tcs){
gpu::copyToHost(d_ccLocal, hornet.nV(), h_tcs);
}
} // namespace hornets_nest
| 57bde74b64587b869778b531afabc027d93b4e34.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include "Static/ClusteringCoefficient/cc.cuh"
#include "Static/TriangleCounting/triangle2.cuh"
using namespace xlib;
namespace hornets_nest {
ClusteringCoefficient::ClusteringCoefficient(HornetGraph& hornet) :
TriangleCounting2(hornet)
// StaticAlgorithm(hornet)
{
// tri = new TriangleCounting2(hornet);
}
ClusteringCoefficient::~ClusteringCoefficient(){
// tri->release();
TriangleCounting2::release();
release();
}
struct OPERATOR_LocalClusteringCoefficients {
triangle_t *d_triPerVertex;
clusterCoeff_t *d_ccLocal;
OPERATOR (Vertex &vertex) {
degree_t deg = vertex.degree();
d_ccLocal[vertex.id()] = 0;
if(deg>1){
d_ccLocal[vertex.id()] = (clusterCoeff_t)d_triPerVertex[vertex.id()]/(clusterCoeff_t)(deg*(deg-1));
}
}
};
void ClusteringCoefficient::reset(){
TriangleCounting2::reset();
}
void ClusteringCoefficient::run(){
TriangleCounting2::run();
forAllVertices(hornet, OPERATOR_LocalClusteringCoefficients { triPerVertex,d_ccLocal });
// int* d_ccLocalInt;
// int sumInt=gpu::reduce(d_ccLocalInt, 10);
// clusterCoeff_t sum=gpu::reduce(d_ccLocal, hornet.nV());
}
void ClusteringCoefficient::release(){
gpu::free(d_ccLocal);
d_ccLocal = nullptr;
}
void ClusteringCoefficient::init(){
//printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV());
gpu::allocate(d_ccLocal, hornet.nV());
TriangleCounting2::init();
reset();
}
void ClusteringCoefficient::copyLocalClusCoeffToHost(clusterCoeff_t* h_tcs){
gpu::copyToHost(d_ccLocal, hornet.nV(), h_tcs);
}
} // namespace hornets_nest
|
02080be5baec6fce7a1ea706818eed186bd81044.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
// CUDA kernel for vector addition
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
// Calculate global thread ID (tid)
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Vector boundary guard
if (tid < n) {
// Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
// Initialize vector of size n to int between 0-99
void vector_init(int* a, int n) {
for (int i = 0; i < n; i++) {
a[i] = rand() % 100;
}
}
// Check vector add result
void check_answer(int* a, int* b, int* c, int n) {
for (int i = 0; i < n; i++) {
assert(c[i] == a[i] + b[i]);
}
}
int main() {
//Get the device id for cuda calls
int id = hipGetDevice(&id);
// Vector size of 2^16 (65536 elements)
int n = 1 << 16;
//unified memory pointers
int* a, * b, * c;
// Allocation size for all vectors
size_t bytes = sizeof(int) * n;
// Allocate device memory
hipMallocManaged(&a, bytes);
hipMallocManaged(&b, bytes);
hipMallocManaged(&c, bytes);
// Initialize vectors a and b with random values between 0 and 99
vector_init(a, n);
vector_init(b, n);
// Threadblock size
int BLOCKS = 256;
// Grid size
int GRID = (int)ceil(n / BLOCKS);
//call cuda kernrl
//for prefetching a and b vectors to device to make sure data gets copied before kernel call
hipMemPrefetchAsync(a, bytes, id);
hipMemPrefetchAsync(b, bytes, id);
// Launch kernel on default stream w/o shmem
hipLaunchKernelGGL(( vectorAdd) , dim3(GRID), dim3(BLOCKS) , 0, 0, a, b, c, n);
//wait for all the previous operations before using values
hipDeviceSynchronize();
//for prefetching c vector to the host
hipMemPrefetchAsync(c, bytes, hipCpuDeviceId);
// Check result for errors
check_answer(a, b, c, n);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} | 02080be5baec6fce7a1ea706818eed186bd81044.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
// CUDA kernel for vector addition
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
// Calculate global thread ID (tid)
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Vector boundary guard
if (tid < n) {
// Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
// Initialize vector of size n to int between 0-99
void vector_init(int* a, int n) {
for (int i = 0; i < n; i++) {
a[i] = rand() % 100;
}
}
// Check vector add result
void check_answer(int* a, int* b, int* c, int n) {
for (int i = 0; i < n; i++) {
assert(c[i] == a[i] + b[i]);
}
}
int main() {
//Get the device id for cuda calls
int id = cudaGetDevice(&id);
// Vector size of 2^16 (65536 elements)
int n = 1 << 16;
//unified memory pointers
int* a, * b, * c;
// Allocation size for all vectors
size_t bytes = sizeof(int) * n;
// Allocate device memory
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
// Initialize vectors a and b with random values between 0 and 99
vector_init(a, n);
vector_init(b, n);
// Threadblock size
int BLOCKS = 256;
// Grid size
int GRID = (int)ceil(n / BLOCKS);
//call cuda kernrl
//for prefetching a and b vectors to device to make sure data gets copied before kernel call
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
// Launch kernel on default stream w/o shmem
vectorAdd <<<GRID, BLOCKS >>> (a, b, c, n);
//wait for all the previous operations before using values
cudaDeviceSynchronize();
//for prefetching c vector to the host
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
// Check result for errors
check_answer(a, b, c, n);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} |
e308c7b11cd9e5f14f1b22eb2ef1703a25b18a5a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l3/Xtrsm.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include "kblas.h"
#include "kblas_common.h"
#include "operators.h"
#include "omp.h"
//==============================================================================================
hipblasStatus_t cublasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
hipblasStatus_t status;
check_error_ret( status = hipblasStrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
hipblasStatus_t status;
check_error_ret( status = hipblasDtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXtrsm (hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
hipblasStatus_t status;
check_error_ret( status = hipblasCtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXtrsm (hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
hipblasStatus_t status;
check_error_ret( status = hipblasZtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
#define WARP 32
//#define WARP1 33
#define WARP2 34
#define tx threadIdx.x
#define ty threadIdx.y
//==============================================================================================
int kblas_trsm_ib_cublas = 128;
bool kblas_trsm_use_custom = 0;
int kblas_trsm_ib_data = 512;
#define SIMPLE_SIZE(n) ( ((n) < WARP) || ( ((n) % WARP == 0) && ( (n) <= kblas_trsm_ib_cublas ) ) )
#define SIMPLE_SIZE_DATA(n) ( (n) <= kblas_trsm_ib_data )
//==============================================================================================
//shuffle intrinsic is not supported before KEPLER
#if (TARGET_SM >= 30)
template<typename T, int WARPS_PER_BLOCK, bool LOWER, bool TRANS, bool CONJG, bool UNIT>
__global__ void //__launch_bounds__(WARP * WARPS_PER_BLOCK)
trsm_mul32_L(int M, int N, T alpha, const T* /*__restrict__*/ A, int incA, T* B, int incB, int mb)
{
const int A_COLS_PER_WARP = WARP / WARPS_PER_BLOCK;
const bool forward = (LOWER != TRANS);
const short WARP1 = (TRANS ? 33 : 32);
//setup shared memory
__shared__ T sA[WARP * WARP1];//strided to avoid bank conflicts
int txyw = tx + ty * WARP1, txyiA = tx + ty * incA, txyiB = tx + ty * incB, jtxw;
int l, c, r, startB = 0, i;
T rB, s, rBj, a[4], b[4], *sAA, *BB;
for(startB = 0; startB < N; startB += gridDim.x * WARPS_PER_BLOCK)
{
if( (blockIdx.x * WARPS_PER_BLOCK + startB) >= N)
return;
BB = B + (blockIdx.x * WARPS_PER_BLOCK + startB) * incB;
//checking boundary case, the column indices of B this warp is computing
//if not active, this warp will only participate in fetching A sub-matrices, will not compute
bool active = ( (blockIdx.x * WARPS_PER_BLOCK + startB + ty) < N );
for(c = (forward ? 0 : mb-1); (forward && c < mb) || (!forward && c >= 0); c += (forward ? 1 : -1))
{
s = make_zero<T>();
for(r = (forward ? 0 : mb-1); (forward && r < c) || (!forward && r > c); r += (forward ? 1 : -1))
{
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
if(TRANS)
//load A(r,c)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (r + c * incA) + l * WARPS_PER_BLOCK * incA];
else
//load A(c,r)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = __ldg(&(A[txyiA + WARP * (c + r * incA) + l * WARPS_PER_BLOCK * incA]));
}
//load B(r)
if(active)
rB = __ldg(&(BB[txyiB + WARP * r]));
__syncthreads();
if(active){
//gemm A(r,c)/A(c,r) & B(r) onto B(c) held at s
if(TRANS)
sAA = sA + tx*WARP1;
else
sAA = sA + tx;
#pragma unroll
for(int j = 0; j < WARP; j+=4){
if(TRANS){
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = CONJG ? conjugate(sAA[j + i]) : sAA[j + i];
}else{
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = sAA[(j + i)*WARP1];
}
#pragma unroll
for(i = 0; i < 4; i++)
b[i] = shfl(rB, j + i);
#pragma unroll
for(i = 0; i < 4; i++)
s = FMA( a[i], b[i], s );
}
}
__syncthreads();
}
//load A(c,c) from global to shared mem
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = __ldg(&(A[txyiA + WARP * c * (incA + 1) + l * WARPS_PER_BLOCK * incA]));
}
//load B(c) into registers
if(active){
rB = __ldg(&(BB[txyiB + WARP * c]));
}
__syncthreads();
if(active)
{
//perform trsm on shared mem
if(!LOWER && TRANS)
jtxw = tx * WARP1;
else
if(!LOWER && !TRANS)
jtxw = tx + (WARP - 1) * WARP1;
else
if(LOWER && TRANS)
jtxw = tx * WARP1 + (WARP - 1);
else
if(LOWER && !TRANS)
jtxw = tx;
#pragma unroll
for(int j = (forward ? 0 : WARP-1); (forward && (j < WARP)) || (!forward && (j >= 0)); j += (forward ? 1 : -1)){
if(j == tx){
rB = FMA(alpha, rB, -s);//TODO
if(!UNIT){
a[0] = (TRANS && CONJG) ? conjugate(sA[tx * (WARP1+1)]) : sA[tx * (WARP1+1)];//diagonal element
rB = rB / a[0];//TODO
}
}
rBj = shfl(rB, j);
if( (forward && (j < tx)) || (!forward && (j > tx)) ){
a[0] = (TRANS && CONJG) ? conjugate(sA[jtxw]) : sA[jtxw];
s = FMA(a[0], rBj, s);
}
jtxw += (TRANS ? 1 : WARP1) * (forward ? 1 : -1);
}
//store back B(c) to global mem
BB[txyiB + WARP * c] = rB;
}
__syncthreads();
}
}
}
//==============================================================================================
#define TRSM_NUM_VARIANTS 4
#define TRSM_kernel_variants(__WPB) \
trsm_mul32_L<T, __WPB, true, false, false, false>, \
trsm_mul32_L<T, __WPB, true, true, false, false>, \
trsm_mul32_L<T, __WPB, false, false, false, false>, \
trsm_mul32_L<T, __WPB, false, true, false, false>
/*,TODO
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, true, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, true, false>*/
template<class T>
hipblasStatus_t Xtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
//handle odd cases with cublas
if( (*alpha == make_zero<T>())
|| (!kblas_trsm_use_custom)
|| (side == HIPBLAS_SIDE_LEFT && m < WARP)
|| (side == HIPBLAS_SIDE_RIGHT/* && n < WARP*/))//TODO
{
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
typedef void (*trsm_kernels_type)(int M, int N, T alpha, const T* A, int incA, T* B, int incB, int mb);
#define WARPS_PER_BLOCK 8
#define B_COLS_PER_WARP 1
trsm_kernels_type trsm_kernels[TRSM_NUM_VARIANTS] = {// T, WARPS_PER_BLOCK, LOWER, TRANS, CONJG, UNIT
TRSM_kernel_variants(WARPS_PER_BLOCK)
};
hipStream_t curStream;
hipblasStatus_t status;
check_error_ret( status = hipblasGetStream( handle, &curStream ), status);
if( ((side == HIPBLAS_SIDE_LEFT) && (m % WARP == 0)) /*|| ((side == HIPBLAS_SIDE_RIGHT) && (n % WARP == 0))*/ )//TODO
{
int func_idx = /*4*(side == HIPBLAS_SIDE_RIGHT) + */2*(uplo == HIPBLAS_FILL_MODE_UPPER) + (trans != HIPBLAS_OP_N);// + (diag == HIPBLAS_DIAG_UNIT);TODO
dim3 blockDim( WARP, WARPS_PER_BLOCK );
dim3 gridDim(
(side == HIPBLAS_SIDE_LEFT) * (n / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (n % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))
/*+TODO
(side == HIPBLAS_SIDE_RIGHT) * (m / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (m % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))*/
, 1);
int mb = (side == HIPBLAS_SIDE_LEFT) * m / WARP /*+ (side == HIPBLAS_SIDE_RIGHT) * n / WARP*/;//TODO
trsm_kernelshipLaunchKernelGGL(([func_idx)], dim3(gridDim), dim3(blockDim), 0, curStream, m, n, *alpha, A, incA, B, incB, mb);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
}else{
//error: we should not reach this case
return HIPBLAS_STATUS_INTERNAL_ERROR;
}
return HIPBLAS_STATUS_SUCCESS;
}
#else
template<class T>
hipblasStatus_t Xtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
return cublasXtrsm( handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
#endif
//==============================================================================================
template<typename T>
hipblasStatus_t kblasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
hipblasStatus_t status;
if( (*alpha == make_zero<T>())//TODO
|| ( (side == HIPBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == HIPBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
return Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
else
if(side == HIPBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, HIPBLAS_OP_N,
m1, n, m2,
&mone, A+m1*incA, incA,
B+m1, incB,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, HIPBLAS_OP_N,
m2, n, m1,
&mone, A+m1*incA, incA,
B, incB,
alpha, B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, HIPBLAS_OP_N,
m2, n, m1,
&mone, A+m1, incA,
B, incB,
alpha, B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, HIPBLAS_OP_N,
m1, n, m2,
&mone, A+m1, incA,
B+m1, incB,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Right / Upper / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
HIPBLAS_OP_N, trans,
m, n2, n1,
&mone, B, incB,
A+n1*incA, incA,
alpha, B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
&one, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
HIPBLAS_OP_N, trans,
m, n1, n2,
&mInvAlpha, B+n1*incB, incB,
A+n1*incA, incA,
&one, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
HIPBLAS_OP_N, trans,
m, n1, n2,
&mone, B+n1*incB, incB,
A+n1, incA,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
HIPBLAS_OP_N, trans,
m, n2, n1,
&mInvAlpha, B, incB,
A+n1, incA,
&one, B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<typename T>
hipblasStatus_t kblasXtrsm(hipblasHandle_t handle, hipStream_t &strIn, hipStream_t &strOut,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA, T* d_A, int lddA,
T *h_B, int ldB, T* d_B, int lddB,
bool BIsIn, bool getBOut, bool AIsIn)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
hipblasStatus_t status;
hipblasOperation_t noTrans = HIPBLAS_OP_N;//Trans = HIPBLAS_OP_T,
hipEvent_t eDataIn, eComp;
check_error_ret( hipEventCreateWithFlags(&eDataIn, hipEventDisableTiming), HIPBLAS_STATUS_EXECUTION_FAILED);
check_error_ret( hipEventCreateWithFlags(&eComp, hipEventDisableTiming), HIPBLAS_STATUS_EXECUTION_FAILED);
hipStream_t strComp;
check_error_ret( hipblasGetStream(handle, &strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
if( (*alpha == make_zero<T>())//TODO
|| ( (side == HIPBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == HIPBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
int Am = (side == HIPBLAS_SIDE_LEFT) ? m : n;
//if B is not already in, copy in B block
if(!BIsIn)
check_error_ret( status = hipblasSetMatrixAsync( m, n, sizeof(T), h_B, ldB, d_B, lddB, strIn ), status);
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( Am, Am, sizeof(T), h_A, ldA, d_A, lddA, strIn ), status);
//wait for data to arrive
if(!AIsIn || !BIsIn){
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if( (status = Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, d_A, lddA,
d_B, lddB ) ) != HIPBLAS_STATUS_SUCCESS ) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
}
else
if(side == HIPBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) || (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error_ret( status = hipblasSetMatrixAsync( m, m, sizeof(T), h_A, ldA, d_A, lddA, strIn), status);
AIsIn = true;
}
if( (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error_ret( status = hipblasSetMatrixAsync( m, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, lddB, h_B+m1, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m1, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*ldA, ldA, d_A+m1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1*lddA, lddA,
d_B+m1, lddB,
alpha, d_B, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m1, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, ldB, d_B+m1, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*ldA, ldA, d_A+m1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1*lddA, lddA,
d_B, lddB,
alpha, d_B+m1, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m1, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, ldB, d_B+m1, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( m2, m1, sizeof(T), h_A+m1, ldA, d_A+m1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1, lddA,
d_B, lddB,
alpha, d_B+m1, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, lddB, h_B+m1, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m1, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1, ldA, d_A+m1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1, lddA,
d_B+m1, lddB,
alpha, d_B, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n1, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*ldB, ldB, d_B+n1*lddB, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*ldA, ldA, d_A+n1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, d_B, lddB,
d_A+n1*lddA, lddA,
alpha, d_B+n1*lddB, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
&one, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*lddB, lddB, h_B+n1*ldB, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m, n1, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*ldA, ldA, d_A+n1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, d_B+n1*lddB, lddB,
d_A+n1*lddA, lddA,
&one, d_B, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*lddB, lddB, h_B+n1*ldB, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m, n1, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1, ldA, d_A+n1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, d_B+n1*lddB, lddB,
d_A+n1, lddA,
alpha, d_B, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n1, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = hipblasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*ldB, ldB, d_B+n1*lddB, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = hipblasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1, ldA, d_A+n1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, d_B, lddB,
d_A+n1, lddA,
&one, d_B+n1*lddB, lddB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
check_error_ret( hipEventDestroy( eDataIn ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipEventDestroy( eComp ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
//#define DO_INLINE_BOUT 0
template<class T>
hipblasStatus_t kblasXtrsm_cpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA,
T *h_B, int ldB){
//allocate memory on device
T *d_A, *d_B;
int Am, An, Bm, Bn, lddA, lddB;
if ( side == HIPBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
lddA = ((Am+31)/32)*32;
lddB = ((Bm+31)/32)*32;
/*check_error_ret( hipHostRegister((void*)h_A, Am * An * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipHostRegister((void*)h_B, Bm * Bn * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);*/
hipblasStatus_t status;
//*
int AsyncEngineCount, devID;
check_error_ret( hipGetDevice(&devID), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipDeviceGetAttribute(&AsyncEngineCount, hipDeviceAttributeAsyncEngineCount, devID), HIPBLAS_STATUS_INTERNAL_ERROR);
bool DO_INLINE_BOUT = AsyncEngineCount > 1;
//*/
check_error_ret( hipMalloc( (void**)&d_A, (lddA*An)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipMalloc( (void**)&d_B, (lddB*Bn)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
//setup streams
hipStream_t inStream, outStream;
check_error_ret( hipStreamCreateWithFlags( &inStream, hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT)
check_error_ret( hipStreamCreateWithFlags( &outStream, hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
//call cpu API trsm
check_error_ret(
(status = kblasXtrsm(handle, inStream, outStream,
side, uplo, trans,diag,
m, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
false, DO_INLINE_BOUT, false)
), status);
//sync streams
if(DO_INLINE_BOUT){
check_error_ret( hipStreamSynchronize( outStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
}else{
hipStream_t compStream;
check_error_ret( hipblasGetStream(handle, &compStream), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamSynchronize( compStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = hipblasGetMatrixAsync( m, n, sizeof(T), d_B, lddB, h_B, ldB, inStream), status);
}
//revoke streams
check_error_ret( hipStreamDestroy( inStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT)
check_error_ret( hipStreamDestroy( outStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
/*check_error_ret( hipHostUnregister( (void*)h_A ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( hipHostUnregister( (void*)h_B ), HIPBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error_ret( hipFree( d_A ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( hipFree( d_B ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<class T>
hipblasStatus_t kblasXtrsm_cpu_m(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA,
T *h_B, int ldB,
//TODO should accept an array of device IDs or a set of cublas handles intead
int ngpu){
//allocate memory on device
T *d_A[ngpu], *d_B[ngpu];
int Am, An, Bm, Bn, lddA, lddB;
if ( side == HIPBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
/*check_error_ret( hipHostRegister((void*)h_A, Am * An * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipHostRegister((void*)h_B, Bm * Bn * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);*/
hipStream_t inStream[ngpu], outStream[ngpu];
hipblasStatus_t status[ngpu];
hipblasHandle_t cub_handle[ngpu];
cub_handle[0] = handle;
//*
bool DO_INLINE_BOUT[ngpu];
for(int g = 0; g < ngpu; g++){
check_error_ret( hipSetDevice(g), HIPBLAS_STATUS_INTERNAL_ERROR);
int AsyncEngineCount;
check_error_ret( hipDeviceGetAttribute(&AsyncEngineCount, hipDeviceAttributeAsyncEngineCount, g), HIPBLAS_STATUS_INTERNAL_ERROR);
DO_INLINE_BOUT[g] = AsyncEngineCount > 1;
if(g > 0)
{
check_error_ret( hipblasCreate(&cub_handle[g]), HIPBLAS_STATUS_INTERNAL_ERROR);
}
//setup streams
check_error_ret( hipStreamCreateWithFlags( &inStream[g], hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT[g])
check_error_ret( hipStreamCreateWithFlags( &outStream[g], hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
}
//*/
//TODO IMPORTANT: handle when data does not fit on all gpus
int Bn_gpu, Bm_gpu;
bool left = (side == HIPBLAS_SIDE_LEFT);
if(left){
Bn_gpu = Bn / ngpu;//TODO handle odd cases
Bm_gpu = Bm;
}else{
Bm_gpu = Bm / ngpu;
Bn_gpu = Bn;
}
lddA = ((Am+31)/32)*32;
lddB = ((Bm_gpu+31)/32)*32;
//
omp_set_num_threads(ngpu);
#pragma omp parallel
{
#pragma omp for
for(int g = 0; g < ngpu; g++){
//TODO check status
hipSetDevice(g);
hipMalloc( (void**)&d_A[g], (lddA*An)*sizeof(T) );
hipMalloc( (void**)&d_B[g], (lddB*Bn_gpu)*sizeof(T) );
//call cpu API trsm
status[g] = kblasXtrsm(cub_handle[g], inStream[g], outStream[g],
side, uplo, trans,diag,
Bm_gpu, Bn_gpu,
alpha, h_A, ldA, d_A[g], lddA,
h_B+g*(left ? Bn_gpu*ldB : Bm_gpu), ldB, d_B[g], lddB,
false, DO_INLINE_BOUT[g], false);
//TODO check this status for error
//sync streams
if(DO_INLINE_BOUT[g]){
hipStreamSynchronize( outStream[g] );
}else{
hipStream_t compStream;
hipblasGetStream(cub_handle[g], &compStream);
hipStreamSynchronize( compStream );
status[g] = hipblasGetMatrixAsync( Bm_gpu, Bn_gpu, sizeof(T), d_B[g], lddB, h_B+g*(left ? Bn_gpu*ldB : Bm_gpu), ldB, inStream[g]);
hipStreamSynchronize( inStream[g] );
}
//hipDeviceSynchronize();
}
//#pragma omp barrier
}
/*/
for(int g = 0; g < ngpu; g++){
check_error_ret( hipSetDevice(g), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipMalloc( (void**)&d_A[g], (Am*An)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipMalloc( (void**)&d_B[g], (Bm_gpu*Bn_gpu)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
//call cpu API trsm
check_error_ret(
(status[g] = kblasXtrsm(cub_handle[g], inStream[g], outStream[g],
side, uplo, trans,diag,
Bm_gpu, Bn_gpu,
alpha, h_A, incA, d_A[g],
h_B+g*(left ? Bn_gpu*incB : Bm_gpu), incB, d_B[g],
false, DO_INLINE_BOUT[g], false)
), status[g]);
}
for(int g = 0; g < ngpu; g++){
check_error_ret( hipSetDevice(g), HIPBLAS_STATUS_INTERNAL_ERROR );
//sync streams
if(DO_INLINE_BOUT[g]){
check_error_ret( hipStreamSynchronize( outStream[g] ), HIPBLAS_STATUS_INTERNAL_ERROR);
}else{
hipStream_t compStream;
check_error_ret( hipblasGetStream(cub_handle[g], &compStream), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipStreamSynchronize( compStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status[g] = hipblasGetMatrixAsync( Bm_gpu, Bn_gpu, sizeof(T), d_B[g], incB, h_B+g*(left ? Bn_gpu*incB : Bm_gpu), incB, inStream[g]), status[g]);
hipStreamSynchronize( inStream[g] );
}
}//*/
for(int g = 0; g < ngpu; g++){
check_error_ret( hipSetDevice(g), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( hipDeviceSynchronize(), HIPBLAS_STATUS_INTERNAL_ERROR );
//revoke streams
check_error_ret( hipStreamDestroy( inStream[g] ), HIPBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT[g])
check_error_ret( hipStreamDestroy( outStream[g] ), HIPBLAS_STATUS_INTERNAL_ERROR);
/*check_error_ret( hipHostUnregister( (void*)h_A ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( hipHostUnregister( (void*)h_B ), HIPBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error_ret( hipFree( d_A[g] ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( hipFree( d_B[g] ), HIPBLAS_STATUS_INTERNAL_ERROR );
if(g > 0)
{
check_error_ret( hipblasDestroy(cub_handle[g]), HIPBLAS_STATUS_INTERNAL_ERROR );
}
}
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
/*extern "C" {
int kblas_strsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB,
hipStream_t stream){
check_error_ret(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB,
hipStream_t stream){
check_error_ret(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int incA,
hipComplex *B, int incB,
hipStream_t stream){
check_error_ret(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int incA,
hipDoubleComplex *B, int incB,
hipStream_t stream){
check_error_ret(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_strsm(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm(
char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int incA,
hipComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm(
char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int incA,
hipDoubleComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
}*/
//==============================================================================================
#define kblasXtrsm_async_BODY { \
\
hipblasHandle_t cublas_handle; \
check_error_ret( hipblasCreate(&cublas_handle), void() ); \
if( hipblasSetStream(cublas_handle, stream) != HIPBLAS_STATUS_SUCCESS ){ \
check_error_ret( hipblasDestroy(cublas_handle), void()); \
return; \
} \
hipblasSideMode_t side_v2 = (side == KBLAS_Left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT); \
hipblasFillMode_t uplo_v2 = (uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER); \
hipblasOperation_t trans_v2 = (trans == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N); \
hipblasDiagType_t diag_v2 = (diag == KBLAS_Unit ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT); \
\
check_error_ret( kblasXtrsm(cublas_handle, \
side_v2, uplo_v2, trans_v2, diag_v2, \
m, n, \
&alpha, A, lda, \
B, ldb), void()); \
\
check_error_ret( hipblasDestroy(cublas_handle), void()); \
}
extern "C"{
void kblasStrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasDtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasCtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int lda,
hipComplex *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasZtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
}
//==============================================================================================
void kblasStrsm(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb){
kblasStrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasDtrsm(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb){
kblasDtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasCtrsm(char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int lda,
hipComplex *B, int ldb){
kblasCtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasZtrsm(char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
kblasZtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
//==============================================================================================
hipblasStatus_t kblasStrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasDtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasCtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasZtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
hipblasStatus_t kblas_strsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_dtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_ctrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_ztrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
hipblasStatus_t kblas_strsm_mgpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
hipblasStatus_t kblas_dtrsm_mgpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
hipblasStatus_t kblas_ctrsm_mgpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
hipblasStatus_t kblas_ztrsm_mgpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
| e308c7b11cd9e5f14f1b22eb2ef1703a25b18a5a.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l3/Xtrsm.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include "kblas.h"
#include "kblas_common.h"
#include "operators.h"
#include "omp.h"
//==============================================================================================
cublasStatus_t cublasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
cublasStatus_t status;
check_error_ret( status = cublasStrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
cublasStatus_t status;
check_error_ret( status = cublasDtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXtrsm (cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
cublasStatus_t status;
check_error_ret( status = cublasCtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXtrsm (cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
cublasStatus_t status;
check_error_ret( status = cublasZtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb ), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
#define WARP 32
//#define WARP1 33
#define WARP2 34
#define tx threadIdx.x
#define ty threadIdx.y
//==============================================================================================
int kblas_trsm_ib_cublas = 128;
bool kblas_trsm_use_custom = 0;
int kblas_trsm_ib_data = 512;
#define SIMPLE_SIZE(n) ( ((n) < WARP) || ( ((n) % WARP == 0) && ( (n) <= kblas_trsm_ib_cublas ) ) )
#define SIMPLE_SIZE_DATA(n) ( (n) <= kblas_trsm_ib_data )
//==============================================================================================
//shuffle intrinsic is not supported before KEPLER
#if (TARGET_SM >= 30)
template<typename T, int WARPS_PER_BLOCK, bool LOWER, bool TRANS, bool CONJG, bool UNIT>
__global__ void //__launch_bounds__(WARP * WARPS_PER_BLOCK)
trsm_mul32_L(int M, int N, T alpha, const T* /*__restrict__*/ A, int incA, T* B, int incB, int mb)
{
const int A_COLS_PER_WARP = WARP / WARPS_PER_BLOCK;
const bool forward = (LOWER != TRANS);
const short WARP1 = (TRANS ? 33 : 32);
//setup shared memory
__shared__ T sA[WARP * WARP1];//strided to avoid bank conflicts
int txyw = tx + ty * WARP1, txyiA = tx + ty * incA, txyiB = tx + ty * incB, jtxw;
int l, c, r, startB = 0, i;
T rB, s, rBj, a[4], b[4], *sAA, *BB;
for(startB = 0; startB < N; startB += gridDim.x * WARPS_PER_BLOCK)
{
if( (blockIdx.x * WARPS_PER_BLOCK + startB) >= N)
return;
BB = B + (blockIdx.x * WARPS_PER_BLOCK + startB) * incB;
//checking boundary case, the column indices of B this warp is computing
//if not active, this warp will only participate in fetching A sub-matrices, will not compute
bool active = ( (blockIdx.x * WARPS_PER_BLOCK + startB + ty) < N );
for(c = (forward ? 0 : mb-1); (forward && c < mb) || (!forward && c >= 0); c += (forward ? 1 : -1))
{
s = make_zero<T>();
for(r = (forward ? 0 : mb-1); (forward && r < c) || (!forward && r > c); r += (forward ? 1 : -1))
{
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
if(TRANS)
//load A(r,c)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (r + c * incA) + l * WARPS_PER_BLOCK * incA];
else
//load A(c,r)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = __ldg(&(A[txyiA + WARP * (c + r * incA) + l * WARPS_PER_BLOCK * incA]));
}
//load B(r)
if(active)
rB = __ldg(&(BB[txyiB + WARP * r]));
__syncthreads();
if(active){
//gemm A(r,c)/A(c,r) & B(r) onto B(c) held at s
if(TRANS)
sAA = sA + tx*WARP1;
else
sAA = sA + tx;
#pragma unroll
for(int j = 0; j < WARP; j+=4){
if(TRANS){
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = CONJG ? conjugate(sAA[j + i]) : sAA[j + i];
}else{
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = sAA[(j + i)*WARP1];
}
#pragma unroll
for(i = 0; i < 4; i++)
b[i] = shfl(rB, j + i);
#pragma unroll
for(i = 0; i < 4; i++)
s = FMA( a[i], b[i], s );
}
}
__syncthreads();
}
//load A(c,c) from global to shared mem
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = __ldg(&(A[txyiA + WARP * c * (incA + 1) + l * WARPS_PER_BLOCK * incA]));
}
//load B(c) into registers
if(active){
rB = __ldg(&(BB[txyiB + WARP * c]));
}
__syncthreads();
if(active)
{
//perform trsm on shared mem
if(!LOWER && TRANS)
jtxw = tx * WARP1;
else
if(!LOWER && !TRANS)
jtxw = tx + (WARP - 1) * WARP1;
else
if(LOWER && TRANS)
jtxw = tx * WARP1 + (WARP - 1);
else
if(LOWER && !TRANS)
jtxw = tx;
#pragma unroll
for(int j = (forward ? 0 : WARP-1); (forward && (j < WARP)) || (!forward && (j >= 0)); j += (forward ? 1 : -1)){
if(j == tx){
rB = FMA(alpha, rB, -s);//TODO
if(!UNIT){
a[0] = (TRANS && CONJG) ? conjugate(sA[tx * (WARP1+1)]) : sA[tx * (WARP1+1)];//diagonal element
rB = rB / a[0];//TODO
}
}
rBj = shfl(rB, j);
if( (forward && (j < tx)) || (!forward && (j > tx)) ){
a[0] = (TRANS && CONJG) ? conjugate(sA[jtxw]) : sA[jtxw];
s = FMA(a[0], rBj, s);
}
jtxw += (TRANS ? 1 : WARP1) * (forward ? 1 : -1);
}
//store back B(c) to global mem
BB[txyiB + WARP * c] = rB;
}
__syncthreads();
}
}
}
//==============================================================================================
#define TRSM_NUM_VARIANTS 4
#define TRSM_kernel_variants(__WPB) \
trsm_mul32_L<T, __WPB, true, false, false, false>, \
trsm_mul32_L<T, __WPB, true, true, false, false>, \
trsm_mul32_L<T, __WPB, false, false, false, false>, \
trsm_mul32_L<T, __WPB, false, true, false, false>
/*,TODO
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, true, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, true, false>*/
template<class T>
cublasStatus_t Xtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
//handle odd cases with cublas
if( (*alpha == make_zero<T>())
|| (!kblas_trsm_use_custom)
|| (side == CUBLAS_SIDE_LEFT && m < WARP)
|| (side == CUBLAS_SIDE_RIGHT/* && n < WARP*/))//TODO
{
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
typedef void (*trsm_kernels_type)(int M, int N, T alpha, const T* A, int incA, T* B, int incB, int mb);
#define WARPS_PER_BLOCK 8
#define B_COLS_PER_WARP 1
trsm_kernels_type trsm_kernels[TRSM_NUM_VARIANTS] = {// T, WARPS_PER_BLOCK, LOWER, TRANS, CONJG, UNIT
TRSM_kernel_variants(WARPS_PER_BLOCK)
};
cudaStream_t curStream;
cublasStatus_t status;
check_error_ret( status = cublasGetStream( handle, &curStream ), status);
if( ((side == CUBLAS_SIDE_LEFT) && (m % WARP == 0)) /*|| ((side == CUBLAS_SIDE_RIGHT) && (n % WARP == 0))*/ )//TODO
{
int func_idx = /*4*(side == CUBLAS_SIDE_RIGHT) + */2*(uplo == CUBLAS_FILL_MODE_UPPER) + (trans != CUBLAS_OP_N);// + (diag == CUBLAS_DIAG_UNIT);TODO
dim3 blockDim( WARP, WARPS_PER_BLOCK );
dim3 gridDim(
(side == CUBLAS_SIDE_LEFT) * (n / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (n % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))
/*+TODO
(side == CUBLAS_SIDE_RIGHT) * (m / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (m % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))*/
, 1);
int mb = (side == CUBLAS_SIDE_LEFT) * m / WARP /*+ (side == CUBLAS_SIDE_RIGHT) * n / WARP*/;//TODO
trsm_kernels[func_idx]<<< gridDim, blockDim, 0, curStream>>> (m, n, *alpha, A, incA, B, incB, mb);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
}else{
//error: we should not reach this case
return CUBLAS_STATUS_INTERNAL_ERROR;
}
return CUBLAS_STATUS_SUCCESS;
}
#else
template<class T>
cublasStatus_t Xtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
return cublasXtrsm( handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
#endif
//==============================================================================================
template<typename T>
cublasStatus_t kblasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
cublasStatus_t status;
if( (*alpha == make_zero<T>())//TODO
|| ( (side == CUBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == CUBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
return Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
else
if(side == CUBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, CUBLAS_OP_N,
m1, n, m2,
&mone, A+m1*incA, incA,
B+m1, incB,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, CUBLAS_OP_N,
m2, n, m1,
&mone, A+m1*incA, incA,
B, incB,
alpha, B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, CUBLAS_OP_N,
m2, n, m1,
&mone, A+m1, incA,
B, incB,
alpha, B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, CUBLAS_OP_N,
m1, n, m2,
&mone, A+m1, incA,
B+m1, incB,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Right / Upper / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
CUBLAS_OP_N, trans,
m, n2, n1,
&mone, B, incB,
A+n1*incA, incA,
alpha, B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
&one, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
CUBLAS_OP_N, trans,
m, n1, n2,
&mInvAlpha, B+n1*incB, incB,
A+n1*incA, incA,
&one, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
CUBLAS_OP_N, trans,
m, n1, n2,
&mone, B+n1*incB, incB,
A+n1, incA,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
CUBLAS_OP_N, trans,
m, n2, n1,
&mInvAlpha, B, incB,
A+n1, incA,
&one, B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<typename T>
cublasStatus_t kblasXtrsm(cublasHandle_t handle, cudaStream_t &strIn, cudaStream_t &strOut,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA, T* d_A, int lddA,
T *h_B, int ldB, T* d_B, int lddB,
bool BIsIn, bool getBOut, bool AIsIn)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
cublasStatus_t status;
cublasOperation_t noTrans = CUBLAS_OP_N;//Trans = CUBLAS_OP_T,
cudaEvent_t eDataIn, eComp;
check_error_ret( cudaEventCreateWithFlags(&eDataIn, cudaEventDisableTiming), CUBLAS_STATUS_EXECUTION_FAILED);
check_error_ret( cudaEventCreateWithFlags(&eComp, cudaEventDisableTiming), CUBLAS_STATUS_EXECUTION_FAILED);
cudaStream_t strComp;
check_error_ret( cublasGetStream_v2(handle, &strComp), CUBLAS_STATUS_INTERNAL_ERROR);
if( (*alpha == make_zero<T>())//TODO
|| ( (side == CUBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == CUBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
int Am = (side == CUBLAS_SIDE_LEFT) ? m : n;
//if B is not already in, copy in B block
if(!BIsIn)
check_error_ret( status = cublasSetMatrixAsync( m, n, sizeof(T), h_B, ldB, d_B, lddB, strIn ), status);
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( Am, Am, sizeof(T), h_A, ldA, d_A, lddA, strIn ), status);
//wait for data to arrive
if(!AIsIn || !BIsIn){
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if( (status = Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, d_A, lddA,
d_B, lddB ) ) != CUBLAS_STATUS_SUCCESS ) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
}
else
if(side == CUBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) || (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error_ret( status = cublasSetMatrixAsync( m, m, sizeof(T), h_A, ldA, d_A, lddA, strIn), status);
AIsIn = true;
}
if( (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error_ret( status = cublasSetMatrixAsync( m, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, lddB, h_B+m1, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m1, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*ldA, ldA, d_A+m1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1*lddA, lddA,
d_B+m1, lddB,
alpha, d_B, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m1, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, ldB, d_B+m1, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*ldA, ldA, d_A+m1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1*lddA, lddA,
d_B, lddB,
alpha, d_B+m1, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m1, n, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, ldB, d_B+m1, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( m2, m1, sizeof(T), h_A+m1, ldA, d_A+m1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1, lddA,
d_B, lddB,
alpha, d_B+m1, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*ldA, ldA, d_A+m1+m1*lddA, lddA,
h_B+m1, ldB, d_B+m1, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, lddB, h_B+m1, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m1, n, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1, ldA, d_A+m1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1, lddA,
d_B+m1, lddB,
alpha, d_B, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n1, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*ldB, ldB, d_B+n1*lddB, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*ldA, ldA, d_A+n1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, d_B, lddB,
d_A+n1*lddA, lddA,
alpha, d_B+n1*lddB, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
&one, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*lddB, lddB, h_B+n1*ldB, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m, n1, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*ldA, ldA, d_A+n1*lddA, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, d_B+n1*lddB, lddB,
d_A+n1*lddA, lddA,
&one, d_B, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*lddB, lddB, h_B+n1*ldB, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m, n1, sizeof(T), h_B, ldB, d_B, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1, ldA, d_A+n1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, d_B+n1*lddB, lddB,
d_A+n1, lddA,
alpha, d_B, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
&one, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error_ret( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n1, sizeof(T), d_B, lddB, h_B, ldB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error_ret( status = cublasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*ldB, ldB, d_B+n1*lddB, lddB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error_ret( status = cublasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1, ldA, d_A+n1, lddA, strIn), status);
//wait for data to arrive
check_error_ret( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, d_B, lddB,
d_A+n1, lddA,
&one, d_B+n1*lddB, lddB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*ldA, ldA, d_A+n1+n1*lddA, lddA,
h_B+n1*ldB, ldB, d_B+n1*lddB, lddB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
check_error_ret( cudaEventDestroy( eDataIn ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaEventDestroy( eComp ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
//#define DO_INLINE_BOUT 0
template<class T>
cublasStatus_t kblasXtrsm_cpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA,
T *h_B, int ldB){
//allocate memory on device
T *d_A, *d_B;
int Am, An, Bm, Bn, lddA, lddB;
if ( side == CUBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
lddA = ((Am+31)/32)*32;
lddB = ((Bm+31)/32)*32;
/*check_error_ret( cudaHostRegister((void*)h_A, Am * An * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaHostRegister((void*)h_B, Bm * Bn * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);*/
cublasStatus_t status;
//*
int AsyncEngineCount, devID;
check_error_ret( cudaGetDevice(&devID), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaDeviceGetAttribute(&AsyncEngineCount, cudaDevAttrAsyncEngineCount, devID), CUBLAS_STATUS_INTERNAL_ERROR);
bool DO_INLINE_BOUT = AsyncEngineCount > 1;
//*/
check_error_ret( cudaMalloc( (void**)&d_A, (lddA*An)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaMalloc( (void**)&d_B, (lddB*Bn)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
//setup streams
cudaStream_t inStream, outStream;
check_error_ret( cudaStreamCreateWithFlags( &inStream, cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT)
check_error_ret( cudaStreamCreateWithFlags( &outStream, cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
//call cpu API trsm
check_error_ret(
(status = kblasXtrsm(handle, inStream, outStream,
side, uplo, trans,diag,
m, n,
alpha, h_A, ldA, d_A, lddA,
h_B, ldB, d_B, lddB,
false, DO_INLINE_BOUT, false)
), status);
//sync streams
if(DO_INLINE_BOUT){
check_error_ret( cudaStreamSynchronize( outStream ), CUBLAS_STATUS_INTERNAL_ERROR);
}else{
cudaStream_t compStream;
check_error_ret( cublasGetStream_v2(handle, &compStream), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamSynchronize( compStream ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status = cublasGetMatrixAsync( m, n, sizeof(T), d_B, lddB, h_B, ldB, inStream), status);
}
//revoke streams
check_error_ret( cudaStreamDestroy( inStream ), CUBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT)
check_error_ret( cudaStreamDestroy( outStream ), CUBLAS_STATUS_INTERNAL_ERROR);
/*check_error_ret( cudaHostUnregister( (void*)h_A ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( cudaHostUnregister( (void*)h_B ), CUBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error_ret( cudaFree( d_A ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( cudaFree( d_B ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<class T>
cublasStatus_t kblasXtrsm_cpu_m(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int ldA,
T *h_B, int ldB,
//TODO should accept an array of device IDs or a set of cublas handles intead
int ngpu){
//allocate memory on device
T *d_A[ngpu], *d_B[ngpu];
int Am, An, Bm, Bn, lddA, lddB;
if ( side == CUBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
/*check_error_ret( cudaHostRegister((void*)h_A, Am * An * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaHostRegister((void*)h_B, Bm * Bn * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);*/
cudaStream_t inStream[ngpu], outStream[ngpu];
cublasStatus_t status[ngpu];
cublasHandle_t cub_handle[ngpu];
cub_handle[0] = handle;
//*
bool DO_INLINE_BOUT[ngpu];
for(int g = 0; g < ngpu; g++){
check_error_ret( cudaSetDevice(g), CUBLAS_STATUS_INTERNAL_ERROR);
int AsyncEngineCount;
check_error_ret( cudaDeviceGetAttribute(&AsyncEngineCount, cudaDevAttrAsyncEngineCount, g), CUBLAS_STATUS_INTERNAL_ERROR);
DO_INLINE_BOUT[g] = AsyncEngineCount > 1;
if(g > 0)
{
check_error_ret( cublasCreate(&cub_handle[g]), CUBLAS_STATUS_INTERNAL_ERROR);
}
//setup streams
check_error_ret( cudaStreamCreateWithFlags( &inStream[g], cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT[g])
check_error_ret( cudaStreamCreateWithFlags( &outStream[g], cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
}
//*/
//TODO IMPORTANT: handle when data does not fit on all gpus
int Bn_gpu, Bm_gpu;
bool left = (side == CUBLAS_SIDE_LEFT);
if(left){
Bn_gpu = Bn / ngpu;//TODO handle odd cases
Bm_gpu = Bm;
}else{
Bm_gpu = Bm / ngpu;
Bn_gpu = Bn;
}
lddA = ((Am+31)/32)*32;
lddB = ((Bm_gpu+31)/32)*32;
//
omp_set_num_threads(ngpu);
#pragma omp parallel
{
#pragma omp for
for(int g = 0; g < ngpu; g++){
//TODO check status
cudaSetDevice(g);
cudaMalloc( (void**)&d_A[g], (lddA*An)*sizeof(T) );
cudaMalloc( (void**)&d_B[g], (lddB*Bn_gpu)*sizeof(T) );
//call cpu API trsm
status[g] = kblasXtrsm(cub_handle[g], inStream[g], outStream[g],
side, uplo, trans,diag,
Bm_gpu, Bn_gpu,
alpha, h_A, ldA, d_A[g], lddA,
h_B+g*(left ? Bn_gpu*ldB : Bm_gpu), ldB, d_B[g], lddB,
false, DO_INLINE_BOUT[g], false);
//TODO check this status for error
//sync streams
if(DO_INLINE_BOUT[g]){
cudaStreamSynchronize( outStream[g] );
}else{
cudaStream_t compStream;
cublasGetStream_v2(cub_handle[g], &compStream);
cudaStreamSynchronize( compStream );
status[g] = cublasGetMatrixAsync( Bm_gpu, Bn_gpu, sizeof(T), d_B[g], lddB, h_B+g*(left ? Bn_gpu*ldB : Bm_gpu), ldB, inStream[g]);
cudaStreamSynchronize( inStream[g] );
}
//cudaDeviceSynchronize();
}
//#pragma omp barrier
}
/*/
for(int g = 0; g < ngpu; g++){
check_error_ret( cudaSetDevice(g), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaMalloc( (void**)&d_A[g], (Am*An)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaMalloc( (void**)&d_B[g], (Bm_gpu*Bn_gpu)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
//call cpu API trsm
check_error_ret(
(status[g] = kblasXtrsm(cub_handle[g], inStream[g], outStream[g],
side, uplo, trans,diag,
Bm_gpu, Bn_gpu,
alpha, h_A, incA, d_A[g],
h_B+g*(left ? Bn_gpu*incB : Bm_gpu), incB, d_B[g],
false, DO_INLINE_BOUT[g], false)
), status[g]);
}
for(int g = 0; g < ngpu; g++){
check_error_ret( cudaSetDevice(g), CUBLAS_STATUS_INTERNAL_ERROR );
//sync streams
if(DO_INLINE_BOUT[g]){
check_error_ret( cudaStreamSynchronize( outStream[g] ), CUBLAS_STATUS_INTERNAL_ERROR);
}else{
cudaStream_t compStream;
check_error_ret( cublasGetStream_v2(cub_handle[g], &compStream), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaStreamSynchronize( compStream ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( status[g] = cublasGetMatrixAsync( Bm_gpu, Bn_gpu, sizeof(T), d_B[g], incB, h_B+g*(left ? Bn_gpu*incB : Bm_gpu), incB, inStream[g]), status[g]);
cudaStreamSynchronize( inStream[g] );
}
}//*/
for(int g = 0; g < ngpu; g++){
check_error_ret( cudaSetDevice(g), CUBLAS_STATUS_INTERNAL_ERROR);
check_error_ret( cudaDeviceSynchronize(), CUBLAS_STATUS_INTERNAL_ERROR );
//revoke streams
check_error_ret( cudaStreamDestroy( inStream[g] ), CUBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT[g])
check_error_ret( cudaStreamDestroy( outStream[g] ), CUBLAS_STATUS_INTERNAL_ERROR);
/*check_error_ret( cudaHostUnregister( (void*)h_A ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( cudaHostUnregister( (void*)h_B ), CUBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error_ret( cudaFree( d_A[g] ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error_ret( cudaFree( d_B[g] ), CUBLAS_STATUS_INTERNAL_ERROR );
if(g > 0)
{
check_error_ret( cublasDestroy(cub_handle[g]), CUBLAS_STATUS_INTERNAL_ERROR );
}
}
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
/*extern "C" {
int kblas_strsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB,
cudaStream_t stream){
check_error_ret(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB,
cudaStream_t stream){
check_error_ret(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int incA,
cuComplex *B, int incB,
cudaStream_t stream){
check_error_ret(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int incA,
cuDoubleComplex *B, int incB,
cudaStream_t stream){
check_error_ret(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_strsm(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm(
char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int incA,
cuComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm(
char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int incA,
cuDoubleComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
}*/
//==============================================================================================
#define kblasXtrsm_async_BODY { \
\
cublasHandle_t cublas_handle; \
check_error_ret( cublasCreate(&cublas_handle), void() ); \
if( cublasSetStream_v2(cublas_handle, stream) != CUBLAS_STATUS_SUCCESS ){ \
check_error_ret( cublasDestroy_v2(cublas_handle), void()); \
return; \
} \
cublasSideMode_t side_v2 = (side == KBLAS_Left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT); \
cublasFillMode_t uplo_v2 = (uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER); \
cublasOperation_t trans_v2 = (trans == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N); \
cublasDiagType_t diag_v2 = (diag == KBLAS_Unit ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT); \
\
check_error_ret( kblasXtrsm(cublas_handle, \
side_v2, uplo_v2, trans_v2, diag_v2, \
m, n, \
&alpha, A, lda, \
B, ldb), void()); \
\
check_error_ret( cublasDestroy_v2(cublas_handle), void()); \
}
extern "C"{
void kblasStrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasDtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasCtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int lda,
cuComplex *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasZtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
}
//==============================================================================================
void kblasStrsm(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb){
kblasStrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasDtrsm(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb){
kblasDtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasCtrsm(char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int lda,
cuComplex *B, int ldb){
kblasCtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasZtrsm(char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
kblasZtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
//==============================================================================================
cublasStatus_t kblasStrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasDtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasCtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasZtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
cublasStatus_t kblas_strsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_dtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_ctrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_ztrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
cublasStatus_t kblas_strsm_mgpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
cublasStatus_t kblas_dtrsm_mgpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
cublasStatus_t kblas_ctrsm_mgpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
cublasStatus_t kblas_ztrsm_mgpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb,
int ngpu){
return kblasXtrsm_cpu_m(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
ngpu);
}
|
0bbe0c62d192af26a507f6d410d4e4f7aa286fe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Odd-even sort
* This will need to be called within a loop that runs from 0 to
* the ceiling of N/2 - 1, where N is the number of eigenvalues
* We assume a linear array of threads and it will be the caller's
* responsibility to ensure the thread indices are in bounds
* Note to self: There is a GPU Quicksort available, but I have to modify
* it to also move around eigenvectors... challenging, striving for accuracy
*/
__global__ void oddEvenEigSort( float *eigenvalues, float *eigenvectors, int N, int odd = 0 ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
elementNum *= 2;
if( odd ) {
elementNum++;
}
if( elementNum >= N - 1 ) {
return;
}
if( eigenvalues[elementNum] > eigenvalues[elementNum + 1] ) {
float tmp = eigenvalues[elementNum];
eigenvalues[elementNum] = eigenvalues[elementNum + 1];
eigenvalues[elementNum + 1] = tmp;
for( int i = 0; i < N; i++ ) {
tmp = eigenvectors[i * N + elementNum];
eigenvectors[i * N + elementNum] = eigenvectors[i * N + elementNum + 1];
eigenvectors[i * N + elementNum + 1] = tmp;
}
}
}
| 0bbe0c62d192af26a507f6d410d4e4f7aa286fe4.cu | /* Odd-even sort
* This will need to be called within a loop that runs from 0 to
* the ceiling of N/2 - 1, where N is the number of eigenvalues
* We assume a linear array of threads and it will be the caller's
* responsibility to ensure the thread indices are in bounds
* Note to self: There is a GPU Quicksort available, but I have to modify
* it to also move around eigenvectors... challenging, striving for accuracy
*/
__global__ void oddEvenEigSort( float *eigenvalues, float *eigenvectors, int N, int odd = 0 ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
elementNum *= 2;
if( odd ) {
elementNum++;
}
if( elementNum >= N - 1 ) {
return;
}
if( eigenvalues[elementNum] > eigenvalues[elementNum + 1] ) {
float tmp = eigenvalues[elementNum];
eigenvalues[elementNum] = eigenvalues[elementNum + 1];
eigenvalues[elementNum + 1] = tmp;
for( int i = 0; i < N; i++ ) {
tmp = eigenvectors[i * N + elementNum];
eigenvectors[i * N + elementNum] = eigenvectors[i * N + elementNum + 1];
eigenvectors[i * N + elementNum + 1] = tmp;
}
}
}
|
2370046cac38443c0c811fba9e6e580232dd562d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_update_output<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(hipGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input1<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input2<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(hipGetLastError());
}
| 2370046cac38443c0c811fba9e6e580232dd562d.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
}
|
4bfbdcc333cf24753e1a82a78eace20a02e1ce2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define DATA_TYPE 0 // 0-SP, 1-INT, 2-DP
#define VECTOR_SIZE 6000000
#define TILE_DIM 1024
#define COMP_ITERATIONS 8192
#define KERNEL_CALLS 1
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4, int size, int compute_iters, int tile_dim)
{
int xIndex = blockIdx.x * tile_dim + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < size) {
ra=A[xIndex];
rb=A[size-xIndex];
rc=A[xIndex];
rd=A[size-xIndex];
// rb=A[xIndex];
for (int i=0;i<compute_iters;i++) {
ra = log(rb);
rb = cos(rc);
rc = log(rd);
rd = sin(ra);
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int compute_iters=COMP_ITERATIONS,
kernel_calls=KERNEL_CALLS,
vector_size=VECTOR_SIZE,
tile_dim=TILE_DIM;
if (argc > 3 || argc == 2) {
printf("\nError: Wrong number of arguments.\n\n");
printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]);
return -1;
}
if (argc == 3) {
kernel_calls = atoi(argv[2]);
compute_iters = atoi(argv[1]);
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
hipEvent_t start, stop;
#if (DATA_TYPE == 0)
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#elif (DATA_TYPE == 1)
size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size);
// allocate host memory
int *h_iA = (int *) malloc(mem_size);
int *h_oC1 = (int *) malloc(mem_size);
int *h_oC2 = (int *) malloc(mem_size);
int *h_oC3 = (int *) malloc(mem_size);
int *h_oC4 = (int *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (int) i+3;
}
// allocate device memory
int *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#else
size_t mem_size = static_cast<size_t>(sizeof(double) * vector_size);
// allocate host memory
double *h_iA = (double *) malloc(mem_size);
double *h_oC1 = (double *) malloc(mem_size);
double *h_oC2 = (double *) malloc(mem_size);
double *h_oC3 = (double *) malloc(mem_size);
double *h_oC4 = (double *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (double) i+3;
}
// allocate device memory
double *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#endif
hipMalloc((void **) &d_iA, mem_size);
hipMalloc((void **) &d_oC1, mem_size);
hipMalloc((void **) &d_oC2, mem_size);
hipMalloc((void **) &d_oC3, mem_size);
hipMalloc((void **) &d_oC4, mem_size);
// copy host data to device
hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
hipEventCreate(&start);
hipEventCreate(&stop);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < kernel_calls; i++)
{
#if (DATA_TYPE == 0)
hipLaunchKernelGGL(( simpleKernel<float>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#elif (DATA_TYPE == 1)
hipLaunchKernelGGL(( simpleKernel<int>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#else
hipLaunchKernelGGL(( simpleKernel<double>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#endif
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float kernelTime;
hipEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/kernel_calls);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/kernel_calls,
vector_size, 1, tile_dim * 1);
free(h_iA);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
hipFree(d_iA);
hipFree(d_oC1);
hipFree(d_oC2);
hipFree(d_oC3);
hipFree(d_oC4);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 4bfbdcc333cf24753e1a82a78eace20a02e1ce2c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define DATA_TYPE 0 // 0-SP, 1-INT, 2-DP
#define VECTOR_SIZE 6000000
#define TILE_DIM 1024
#define COMP_ITERATIONS 8192
#define KERNEL_CALLS 1
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4, int size, int compute_iters, int tile_dim)
{
int xIndex = blockIdx.x * tile_dim + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < size) {
ra=A[xIndex];
rb=A[size-xIndex];
rc=A[xIndex];
rd=A[size-xIndex];
// rb=A[xIndex];
for (int i=0;i<compute_iters;i++) {
ra = log(rb);
rb = cos(rc);
rc = log(rd);
rd = sin(ra);
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int compute_iters=COMP_ITERATIONS,
kernel_calls=KERNEL_CALLS,
vector_size=VECTOR_SIZE,
tile_dim=TILE_DIM;
if (argc > 3 || argc == 2) {
printf("\nError: Wrong number of arguments.\n\n");
printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]);
return -1;
}
if (argc == 3) {
kernel_calls = atoi(argv[2]);
compute_iters = atoi(argv[1]);
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
#if (DATA_TYPE == 0)
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#elif (DATA_TYPE == 1)
size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size);
// allocate host memory
int *h_iA = (int *) malloc(mem_size);
int *h_oC1 = (int *) malloc(mem_size);
int *h_oC2 = (int *) malloc(mem_size);
int *h_oC3 = (int *) malloc(mem_size);
int *h_oC4 = (int *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (int) i+3;
}
// allocate device memory
int *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#else
size_t mem_size = static_cast<size_t>(sizeof(double) * vector_size);
// allocate host memory
double *h_iA = (double *) malloc(mem_size);
double *h_oC1 = (double *) malloc(mem_size);
double *h_oC2 = (double *) malloc(mem_size);
double *h_oC3 = (double *) malloc(mem_size);
double *h_oC4 = (double *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (double) i+3;
}
// allocate device memory
double *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
#endif
cudaMalloc((void **) &d_iA, mem_size);
cudaMalloc((void **) &d_oC1, mem_size);
cudaMalloc((void **) &d_oC2, mem_size);
cudaMalloc((void **) &d_oC3, mem_size);
cudaMalloc((void **) &d_oC4, mem_size);
// copy host data to device
cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < kernel_calls; i++)
{
#if (DATA_TYPE == 0)
simpleKernel<float><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#elif (DATA_TYPE == 1)
simpleKernel<int><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#else
simpleKernel<double><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim);
#endif
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/kernel_calls);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/kernel_calls,
vector_size, 1, tile_dim * 1);
free(h_iA);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
cudaFree(d_iA);
cudaFree(d_oC1);
cudaFree(d_oC2);
cudaFree(d_oC3);
cudaFree(d_oC4);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
20f627cd88f5f04405decf1177c7557fadcb82a2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CSR.cpp
*
* Created on: 14 mars 2018
* Author: yoann
*/
#include "CSR.h"
#include <string>
#include <fstream>
#include <algorithm>
#include <iostream>
#include <ctime>
#include <omp.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h> // For CUDA runtime API
__global__ void gpuVectorProduct(double* as,int* ja,int* irp, int M, int L,double* vector, double* solution){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < M){
double t = 0;
for(int j = irp[idx]; j <= irp[idx+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[idx]=t;
}
}
CSR::CSR(const std::string filePath) {
// Open the file:
std::ifstream fin(filePath.c_str());
// Declare variables:
int M, N, L;//NUmber of row, number of column, number of NN entries
int* column;
int* row;
double* entry;
// Ignore headers and comments:
while (fin.peek() == '%') fin.ignore(2048, '\n');
// Read defining parameters:
fin >> M >> N >> L;
this->M = M;
this->N = N;
this->L = L;
this->irp = new int[M+1];
this->as = new double[L];
this->ja = new int[L];
column = new int[L];
row = new int[L];
entry = new double[L];
// Read the data
for (int l = 0; l < L; l++)
{
int m,n;
double data;
fin >> m >> n >> data;
row[l] = m;
column[l] = n;
entry[l] = data;
}
fin.close();
//Parse into CSR
int position = 0;
for(int currentRow = 1; currentRow <= M ; currentRow++ ){
for(int i = 0 ; i < L ; i++){
if(row[i] == currentRow ){
this->as[position] = entry[i];
this->ja[position] = column[i]-1;
position++;
}
}
}
this->irp[0] = 0;
int oldValue(0);
position = 1;
for(int i = 1 ; i <= M ; i++){//The row containing the next NN entry
int nbValuePerRow = 0;
for(int j = 0 ; j < L ; j++ ){//iterating through the row array to find the number of NN entry
if(row[j] == i){
nbValuePerRow++;
}
}
this->irp[position] = oldValue+nbValuePerRow;
position++;
oldValue += nbValuePerRow;
}
}
/**
* Compute the product
*/
double CSR::serialVectorProduct(double* vector, double* solution){
double begin = omp_get_wtime();
double t;
for(int i = 0; i < (*this).M; i++ ){
t = 0;
for(int j = (*this).irp[i]; j <= (*this).irp[i+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[i]=t;
}
double end = omp_get_wtime();
return double(end - begin) ;
}
/**
* Compute the vector product using OPEN MP
*/
double CSR::openMPVectorProduct(double* vector, double* solution,int nCore){
double begin = omp_get_wtime();
#pragma omp parallel num_threads(nCore) shared(vector,solution)
{
double t;
int chunk = this->M/omp_get_num_threads();
#pragma omp for schedule(static,chunk)
for(int i = 0; i < (*this).M; i++ ){
t = 0;
for(int j = (*this).irp[i]; j <= (*this).irp[i+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[i]=t;
}
}
double end = omp_get_wtime();
return double(end - begin) ;
}
/**
* Redefinition of the << operator.
*/
std::ostream& operator<<(std::ostream& os, CSR& obj)
{
os << "M: " << obj.M << std::endl;
os << "N: " << obj.N << std::endl;
os << "AS: " << std::endl;
for(int i = 0; i < obj.L; i ++){
os << obj.as[i] << " ";
}
os << std::endl;
os << "IRP: " << std::endl;
for(int i = 0; i < obj.M+1; i ++){
os << obj.irp[i] << " ";
}
os << std::endl;
os << "JA: " << std::endl;
for(int i = 0; i < obj.L ; i ++){
os << obj.ja[i] << " ";
}
os << std::endl;
return os;
}
double CSR::cudaVectorProduct(double* vector, double* solution){
// Allocate global memory on the GPU.
double* d_vector = 0 ;
double* d_solution = 0;
int* d_irp = 0;
int* d_ja = 0;
double* d_as = 0;
hipMalloc((void**) &d_vector, this->getN() * sizeof(double));
hipMalloc((void**) &d_solution, this->getM() * sizeof(double));
hipMalloc((void**) &d_ja, this->getL() * sizeof(int));
hipMalloc((void**) &d_as, this->getL() * sizeof(double));
hipMalloc((void**) &d_irp, (this->getM()+1) * sizeof(int));
// Copy vectors from the host (CPU) to the device (GPU).
hipMemcpy(d_vector, vector, this->getN() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_ja, this->getJA(), this->getL() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_as, this->getAS(), this->getL() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_irp, this->getIRP(), (this->getM()+1) * sizeof(int), hipMemcpyHostToDevice);
//Calling method and mesuring time
int nbBlock = 1;
int nbThread = this->getM();
if(nbThread >= 1024){
nbBlock = (this->getM()/1024)+1;
nbThread = 1024;
}
double begin = omp_get_wtime();
hipLaunchKernelGGL(( gpuVectorProduct), dim3(nbBlock),dim3(nbThread), 0, 0, d_as,d_ja,d_irp,this->getM(),this->getL(),d_vector,d_solution);
hipDeviceSynchronize();
double end = omp_get_wtime();
//get back the result from the GPU
hipMemcpy(solution, d_solution, this->getM() * sizeof(double), hipMemcpyDeviceToHost);
//Clean Memory
hipFree(d_vector);
hipFree(d_ja);
hipFree(d_as);
hipFree(d_irp);
hipFree(d_solution);
return double(end - begin) ;
}
int CSR::getM(){
return this->M;
}
int CSR::getN(){
return this->N;
}
int CSR::getL(){
return this->L;
}
int* CSR::getJA(){
return this->ja;
}
int* CSR::getIRP(){
return this->irp;
}
double* CSR::getAS(){
return this->as;
}
CSR::~CSR() {
}
| 20f627cd88f5f04405decf1177c7557fadcb82a2.cu | /*
* CSR.cpp
*
* Created on: 14 mars 2018
* Author: yoann
*/
#include "CSR.h"
#include <string>
#include <fstream>
#include <algorithm>
#include <iostream>
#include <ctime>
#include <omp.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuda_runtime.h> // For CUDA runtime API
__global__ void gpuVectorProduct(double* as,int* ja,int* irp, int M, int L,double* vector, double* solution){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < M){
double t = 0;
for(int j = irp[idx]; j <= irp[idx+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[idx]=t;
}
}
CSR::CSR(const std::string filePath) {
// Open the file:
std::ifstream fin(filePath.c_str());
// Declare variables:
int M, N, L;//NUmber of row, number of column, number of NN entries
int* column;
int* row;
double* entry;
// Ignore headers and comments:
while (fin.peek() == '%') fin.ignore(2048, '\n');
// Read defining parameters:
fin >> M >> N >> L;
this->M = M;
this->N = N;
this->L = L;
this->irp = new int[M+1];
this->as = new double[L];
this->ja = new int[L];
column = new int[L];
row = new int[L];
entry = new double[L];
// Read the data
for (int l = 0; l < L; l++)
{
int m,n;
double data;
fin >> m >> n >> data;
row[l] = m;
column[l] = n;
entry[l] = data;
}
fin.close();
//Parse into CSR
int position = 0;
for(int currentRow = 1; currentRow <= M ; currentRow++ ){
for(int i = 0 ; i < L ; i++){
if(row[i] == currentRow ){
this->as[position] = entry[i];
this->ja[position] = column[i]-1;
position++;
}
}
}
this->irp[0] = 0;
int oldValue(0);
position = 1;
for(int i = 1 ; i <= M ; i++){//The row containing the next NN entry
int nbValuePerRow = 0;
for(int j = 0 ; j < L ; j++ ){//iterating through the row array to find the number of NN entry
if(row[j] == i){
nbValuePerRow++;
}
}
this->irp[position] = oldValue+nbValuePerRow;
position++;
oldValue += nbValuePerRow;
}
}
/**
* Compute the product
*/
double CSR::serialVectorProduct(double* vector, double* solution){
double begin = omp_get_wtime();
double t;
for(int i = 0; i < (*this).M; i++ ){
t = 0;
for(int j = (*this).irp[i]; j <= (*this).irp[i+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[i]=t;
}
double end = omp_get_wtime();
return double(end - begin) ;
}
/**
* Compute the vector product using OPEN MP
*/
double CSR::openMPVectorProduct(double* vector, double* solution,int nCore){
double begin = omp_get_wtime();
#pragma omp parallel num_threads(nCore) shared(vector,solution)
{
double t;
int chunk = this->M/omp_get_num_threads();
#pragma omp for schedule(static,chunk)
for(int i = 0; i < (*this).M; i++ ){
t = 0;
for(int j = (*this).irp[i]; j <= (*this).irp[i+1]-1;j++){
t += as[j]*vector[ja[j]];
}
solution[i]=t;
}
}
double end = omp_get_wtime();
return double(end - begin) ;
}
/**
* Redefinition of the << operator.
*/
std::ostream& operator<<(std::ostream& os, CSR& obj)
{
os << "M: " << obj.M << std::endl;
os << "N: " << obj.N << std::endl;
os << "AS: " << std::endl;
for(int i = 0; i < obj.L; i ++){
os << obj.as[i] << " ";
}
os << std::endl;
os << "IRP: " << std::endl;
for(int i = 0; i < obj.M+1; i ++){
os << obj.irp[i] << " ";
}
os << std::endl;
os << "JA: " << std::endl;
for(int i = 0; i < obj.L ; i ++){
os << obj.ja[i] << " ";
}
os << std::endl;
return os;
}
double CSR::cudaVectorProduct(double* vector, double* solution){
// Allocate global memory on the GPU.
double* d_vector = 0 ;
double* d_solution = 0;
int* d_irp = 0;
int* d_ja = 0;
double* d_as = 0;
cudaMalloc((void**) &d_vector, this->getN() * sizeof(double));
cudaMalloc((void**) &d_solution, this->getM() * sizeof(double));
cudaMalloc((void**) &d_ja, this->getL() * sizeof(int));
cudaMalloc((void**) &d_as, this->getL() * sizeof(double));
cudaMalloc((void**) &d_irp, (this->getM()+1) * sizeof(int));
// Copy vectors from the host (CPU) to the device (GPU).
cudaMemcpy(d_vector, vector, this->getN() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_ja, this->getJA(), this->getL() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_as, this->getAS(), this->getL() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_irp, this->getIRP(), (this->getM()+1) * sizeof(int), cudaMemcpyHostToDevice);
//Calling method and mesuring time
int nbBlock = 1;
int nbThread = this->getM();
if(nbThread >= 1024){
nbBlock = (this->getM()/1024)+1;
nbThread = 1024;
}
double begin = omp_get_wtime();
gpuVectorProduct<<<nbBlock,nbThread>>>(d_as,d_ja,d_irp,this->getM(),this->getL(),d_vector,d_solution);
cudaDeviceSynchronize();
double end = omp_get_wtime();
//get back the result from the GPU
cudaMemcpy(solution, d_solution, this->getM() * sizeof(double), cudaMemcpyDeviceToHost);
//Clean Memory
cudaFree(d_vector);
cudaFree(d_ja);
cudaFree(d_as);
cudaFree(d_irp);
cudaFree(d_solution);
return double(end - begin) ;
}
int CSR::getM(){
return this->M;
}
int CSR::getN(){
return this->N;
}
int CSR::getL(){
return this->L;
}
int* CSR::getJA(){
return this->ja;
}
int* CSR::getIRP(){
return this->irp;
}
double* CSR::getAS(){
return this->as;
}
CSR::~CSR() {
}
|
80580dc7d885b9b38997cc646a718e6d82e09200.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <opencv2/opencv.hpp>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input Image: " << image.rows << " x " << image.cols << " x "
<< image.channels() << std::endl;
return image;
}
void save_image(const char* output_filename,
float* buffer,
int height,
int width) {
cv::Mat output_image(height, width, CV_32FC3, buffer);
// Make negative values zero.
cv::threshold(output_image,
output_image,
/*threshold=*/0,
/*maxval=*/0,
cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
cv::Mat image = load_image(argv[1]);
hipSetDevice(gpu_id);
cudnnHandle_t cudnn;
cudnnCreate(&cudnn);
// Input
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
// Kernel
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
// Convolution
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
int batch_size{0}, channels{0}, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
// CUDNN_CONVOLUTION_FWD_ALGO_GEMM
// CUDNN_CONVOLUTION_FWD_ALGO_FFT
// CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
void* d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{nullptr};
hipMalloc(&d_input, image_bytes);
hipMemcpy(d_input, image.ptr<float>(0), image_bytes, hipMemcpyHostToDevice);
float* d_output{nullptr};
hipMalloc(&d_output, image_bytes);
hipMemset(d_output, 0, image_bytes);
// clang-format off
const float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{nullptr};
hipMalloc(&d_kernel, sizeof(h_kernel));
hipMemcpy(d_kernel, h_kernel, sizeof(h_kernel), hipMemcpyHostToDevice);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
float* h_output = new float[image_bytes];
hipMemcpy(h_output, d_output, image_bytes, hipMemcpyDeviceToHost);
save_image("cudnn-out.png", h_output, height, width);
delete[] h_output;
hipFree(d_kernel);
hipFree(d_input);
hipFree(d_output);
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
| 80580dc7d885b9b38997cc646a718e6d82e09200.cu | #include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <opencv2/opencv.hpp>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input Image: " << image.rows << " x " << image.cols << " x "
<< image.channels() << std::endl;
return image;
}
void save_image(const char* output_filename,
float* buffer,
int height,
int width) {
cv::Mat output_image(height, width, CV_32FC3, buffer);
// Make negative values zero.
cv::threshold(output_image,
output_image,
/*threshold=*/0,
/*maxval=*/0,
cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
cv::Mat image = load_image(argv[1]);
cudaSetDevice(gpu_id);
cudnnHandle_t cudnn;
cudnnCreate(&cudnn);
// Input
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
// Kernel
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
// Convolution
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
int batch_size{0}, channels{0}, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
// CUDNN_CONVOLUTION_FWD_ALGO_GEMM
// CUDNN_CONVOLUTION_FWD_ALGO_FFT
// CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
void* d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{nullptr};
cudaMalloc(&d_input, image_bytes);
cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice);
float* d_output{nullptr};
cudaMalloc(&d_output, image_bytes);
cudaMemset(d_output, 0, image_bytes);
// clang-format off
const float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{nullptr};
cudaMalloc(&d_kernel, sizeof(h_kernel));
cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
float* h_output = new float[image_bytes];
cudaMemcpy(h_output, d_output, image_bytes, cudaMemcpyDeviceToHost);
save_image("cudnn-out.png", h_output, height, width);
delete[] h_output;
cudaFree(d_kernel);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
|
53b8f240ec1285e9657da7b7f40cba7273e23ab2.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron<64>
template hipError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron<64> >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSpheropolyhedron<64> >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron<64> >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron<64> >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 53b8f240ec1285e9657da7b7f40cba7273e23ab2.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron<64>
template cudaError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron<64> >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSpheropolyhedron<64> >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron<64> >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron<64> >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron<64> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
954801db981470effc68a5259968aa639aa44940.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing boundary condition enforcing (bce) markers forces
// in FSI system.
// =============================================================================
#include "chrono_fsi/physics/ChBce.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include <type_traits>
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ double atomicAdd_double(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Populate_RigidSPH_MeshPos_LRF_D(Real3* rigidSPH_MeshPos_LRF_D,
Real4* posRadD,
uint* rigidIdentifierD,
Real3* posRigidD,
Real4* qD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
int rigidIndex = rigidIdentifierD[index];
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
Real4 q4 = qD[rigidIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 dist3 = mR3(posRadD[rigidMarkerIndex]) - posRigidD[rigidIndex];
Real3 dist3LF = InverseRotate_By_RotationMatrix_DeviceHost(a1, a2, a3, dist3);
// Save the coordinates in the local reference of a rigid body
rigidSPH_MeshPos_LRF_D[index] = dist3LF;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Populate_FlexSPH_MeshPos_LRF_D(Real3* FlexSPH_MeshPos_LRF_D,
Real3* FlexSPH_MeshPos_LRF_H,
Real4* posRadD,
uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real3* pos_fsi_fea_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
// The coordinates of BCE in local reference frame is already calculated when created,
// So only need to copy from host to device here
FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index];
// No need to do it again. Keep this code in case of any issues later
/*int FlexIndex = FlexIdentifierD[index];
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
int numFlex1D = numObjectsD.numFlexBodies1D;
Real Spacing = paramsD.HSML * paramsD.MULT_INITSPACE_Shells;
if (FlexIndex < numFlex1D) {
uint2 cableNodes = CableElementsNodesD[FlexIndex];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[cableNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[cableNodes.y];
Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - pos_fsi_fea_D_nA;
Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA;
Real Cable_x = length(x_dir);
x_dir = x_dir / length(x_dir);
Real norm_dir_length = length(cross(dist3, x_dir));
Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y);
y_dir = y_dir / length(y_dir);
Real3 z_dir = cross(x_dir, y_dir);
Real dx = dot(dist3, x_dir);
Real dy = dot(dist3, y_dir);
Real dz = dot(dist3, z_dir);
if (abs(dy) > 0)
dy /= Spacing;
if (abs(dz) > 0)
dz /= Spacing;
FlexSPH_MeshPos_LRF_D[index] = mR3(dx / Cable_x, dy, dz);
}
if (FlexIndex >= numFlex1D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y];
Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z];
Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w];
Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD);
Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD);
Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB);
Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center;
Real3 physic_to_natural = mR3(1.0 / Shell_x, 1.0 / Shell_y, 1);
Real3 pos_physical = FlexSPH_MeshPos_LRF_H[index];
Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y,
pos_physical.z * physic_to_natural.z);
Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB));
Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC));
Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD));
Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA));
Real3 Normal = normalize(n1 + n2 + n3 + n4);
Real zSide = dot(Normal, dist3) / Spacing;
FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index];
}*/
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Rigid_FSI_Forces_Torques_D(Real3* rigid_FSI_ForcesD,
Real3* rigid_FSI_TorquesD,
Real4* derivVelRhoD,
Real4* derivVelRhoD_old,
Real4* posRadD,
uint* rigidIdentifierD,
Real3* posRigidD,
Real3* rigidSPH_MeshPos_LRF_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
int RigidIndex = rigidIdentifierD[index];
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
Real3 Force = (mR3(derivVelRhoD[rigidMarkerIndex]) * paramsD.Beta +
mR3(derivVelRhoD_old[rigidMarkerIndex]) * (1 - paramsD.Beta)) * paramsD.markerMass;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].x), Force.x);
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].y), Force.y);
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z);
} else {
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].x), Force.x);
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].y), Force.y);
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].z), Force.z);
}
Real3 dist3 = Distance(mR3(posRadD[rigidMarkerIndex]), posRigidD[RigidIndex]);
Real3 mtorque = cross(dist3, Force);
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x);
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y);
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z);
} else {
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x);
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y);
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Flex_FSI_ForcesD(Real3* FlexSPH_MeshPos_LRF_D,
uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real4* derivVelRhoD,
Real4* derivVelRhoD_old,
Real3* pos_fsi_fea_D,
Real3* Flex_FSI_ForcesD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
int FlexIndex = FlexIdentifierD[index];
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
Real3 Force = (mR3(derivVelRhoD[FlexMarkerIndex]) * paramsD.Beta +
mR3(derivVelRhoD_old[FlexMarkerIndex]) * (1 - paramsD.Beta)) * paramsD.markerMass;
int numFlex1D = numObjectsD.numFlexBodies1D;
if (FlexIndex < numFlex1D) {
// Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x);
// Real NA = N_cable.x;
// Real NB = N_cable.y;
Real NA = 1 - FlexSPH_MeshPos_LRF_D[index].x;
Real NB = FlexSPH_MeshPos_LRF_D[index].x;
int nA = CableElementsNodesD[FlexIndex].x;
int nB = CableElementsNodesD[FlexIndex].y;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
} else {
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
}
}
if (FlexIndex >= numFlex1D) {
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
int nA = ShellElementsNodesD[FlexIndex - numFlex1D].x;
int nB = ShellElementsNodesD[FlexIndex - numFlex1D].y;
int nC = ShellElementsNodesD[FlexIndex - numFlex1D].z;
int nD = ShellElementsNodesD[FlexIndex - numFlex1D].w;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].x), NC * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].y), NC * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].z), NC * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].x), ND * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].y), ND * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].z), ND * Force.z);
} else {
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].x), NC * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].y), NC * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].z), NC * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].x), ND * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].y), ND * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].z), ND * Force.z);
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void BCE_modification_Share(Real3& sumVW,
Real3& sumRhoRW,
Real& sumPW,
Real3& sumTauXxYyZzW,
Real3& sumTauXyXzYzW,
Real& sumWFluid,
int& isAffectedV,
int& isAffectedP,
int3 gridPos,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
Real4 rhoPresMuB = sortedRhoPreMu[j];
Real kernel_radius = RESOLUTION_LENGTH_MULT * paramsD.HSML;
if (dd > kernel_radius * kernel_radius || rhoPresMuB.w > -0.5)
continue;
Real d = length(dist3);
Real Wd = W3h(d, sortedPosRad[j].w);
Real3 velMasB = sortedVelMas[j];
sumVW += velMasB * Wd;
sumRhoRW += rhoPresMuB.x * dist3 * Wd;
sumPW += rhoPresMuB.y * Wd;
sumWFluid += Wd;
sumTauXxYyZzW += sortedTauXxYyZz[j] * Wd;
sumTauXyXzYzW += sortedTauXyXzYz[j] * Wd;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void BCE_VelocityPressureStress(Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* tauXxYyZz_ModifiedBCE,
Real3* tauXyXzYz_ModifiedBCE,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* cellStart,
uint* cellEnd,
uint* mapOriginalToSorted,
uint* extendedActivityIdD,
Real3* bceAcc,
int2 newPortion,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint sphIndex = index + newPortion.x;
if (index >= newPortion.y - newPortion.x)
return;
// no need to do anything if it is not an active particle
uint originalIndex = sphIndex;
uint activity = extendedActivityIdD[originalIndex];
if (activity == 0)
return;
uint bceIndex = index;
if (paramsD.bceTypeWall == BceVersion::ORIGINAL)
bceIndex = index + numObjectsD.numBoundaryMarkers;
uint idA = mapOriginalToSorted[sphIndex];
Real4 rhoPreMuA = sortedRhoPreMu[idA];
Real3 posRadA = mR3(sortedPosRad[idA]);
Real3 velMasA = sortedVelMas[idA];
int isAffectedV = 0;
int isAffectedP = 0;
Real3 sumVW = mR3(0);
Real3 sumRhoRW = mR3(0);
Real sumPW = 0;
Real sumWFluid = 0;
Real3 sumTauXxYyZzW = mR3(0);
Real3 sumTauXyXzYzW = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
BCE_modification_Share(sumVW, sumRhoRW, sumPW, sumTauXxYyZzW, sumTauXyXzYzW, sumWFluid, isAffectedV,
isAffectedP, neighbourPos, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu,
sortedTauXxYyZz, sortedTauXyXzYz, cellStart, cellEnd);
}
}
}
if (abs(sumWFluid) > EPSILON) {
// modify velocity
Real3 modifiedBCE_v = 2 * velMasA - sumVW / sumWFluid;
velMas_ModifiedBCE[bceIndex] = modifiedBCE_v;
// modify pressure and stress
Real3 aW = mR3(0.0);
if (rhoPreMuA.w > 0.5 && rhoPreMuA.w < 1.5) {
// Get acceleration of rigid body's BCE particle
int rigidBceIndex = sphIndex - numObjectsD.startRigidMarkers;
if (rigidBceIndex < 0 || rigidBceIndex >= numObjectsD.numRigidMarkers) {
printf(
"Error! particle index out of bound: thrown from "
"ChBce.cu, new_BCE_VelocityPressure !\n");
*isErrorD = true;
return;
}
aW = bceAcc[rigidBceIndex];
}
if (rhoPreMuA.w > 1.5 && rhoPreMuA.w < 3.5) {
// Get acceleration of flexible body's BCE particle
int flexBceIndex = sphIndex - numObjectsD.startFlexMarkers;
if (flexBceIndex < 0 || flexBceIndex >= numObjectsD.numFlexMarkers) {
printf(
"Error! particle index out of bound: thrown from "
"ChBce.cu, new_BCE_VelocityPressure !\n");
*isErrorD = true;
return;
}
aW = bceAcc[flexBceIndex + numObjectsD.numRigidMarkers];
}
Real pressure = (sumPW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid;
Real density = InvEos(pressure);
rhoPreMu_ModifiedBCE[bceIndex] = mR4(density, pressure, rhoPreMuA.z, rhoPreMuA.w);
if (paramsD.elastic_SPH) {
Real3 tauXxYyZz = (sumTauXxYyZzW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid;
Real3 tauXyXzYz = sumTauXyXzYzW / sumWFluid;
tauXxYyZz_ModifiedBCE[bceIndex] = mR3(tauXxYyZz.x, tauXxYyZz.y, tauXxYyZz.z);
tauXyXzYz_ModifiedBCE[bceIndex] = mR3(tauXyXzYz.x, tauXyXzYz.y, tauXyXzYz.z);
}
} else {
rhoPreMu_ModifiedBCE[bceIndex] = mR4(paramsD.rho0, paramsD.BASEPRES, paramsD.mu0, rhoPreMuA.w);
velMas_ModifiedBCE[bceIndex] = mR3(0.0);
if (paramsD.elastic_SPH) {
tauXxYyZz_ModifiedBCE[bceIndex] = mR3(0.0);
tauXyXzYz_ModifiedBCE[bceIndex] = mR3(0.0);
}
}
sortedVelMas[idA] = velMas_ModifiedBCE[bceIndex];
sortedRhoPreMu[idA] = rhoPreMu_ModifiedBCE[bceIndex];
if (paramsD.elastic_SPH) {
sortedTauXxYyZz[idA] = tauXxYyZz_ModifiedBCE[bceIndex];
sortedTauXyXzYz[idA] = tauXyXzYz_ModifiedBCE[bceIndex];
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcRigidBceAccelerationD(Real3* bceAcc,
Real4* q_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
Real3* rigidSPH_MeshPos_LRF_D,
const uint* rigidIdentifierD) {
uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (bceIndex >= numObjectsD.numRigidMarkers)
return;
int rigidBodyIndex = rigidIdentifierD[bceIndex];
// linear acceleration (CM)
Real3 acc3 = accRigid_fsiBodies_D[rigidBodyIndex];
Real4 q4 = q_fsiBodies_D[rigidBodyIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 wVel3 = omegaVelLRF_fsiBodies_D[rigidBodyIndex];
Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[bceIndex];
Real3 wVelCrossS = cross(wVel3, rigidSPH_MeshPos_LRF);
Real3 wVelCrossWVelCrossS = cross(wVel3, wVelCrossS);
// centrigugal acceleration
acc3 += mR3(dot(a1, wVelCrossWVelCrossS), dot(a2, wVelCrossWVelCrossS), dot(a3, wVelCrossWVelCrossS));
Real3 wAcc3 = omegaAccLRF_fsiBodies_D[rigidBodyIndex];
Real3 wAccCrossS = cross(wAcc3, rigidSPH_MeshPos_LRF);
// tangential acceleration
acc3 += mR3(dot(a1, wAccCrossS), dot(a2, wAccCrossS), dot(a3, wAccCrossS));
bceAcc[bceIndex] = acc3;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcFlexBceAccelerationD(Real3* bceAcc,
Real3* acc_fsi_fea_D,
Real3* FlexSPH_MeshPos_LRF_D,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
const uint* FlexIdentifierD) {
uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (bceIndex >= numObjectsD.numFlexMarkers)
return;
int FlexIndex = FlexIdentifierD[bceIndex];
int numFlex1D = numObjectsD.numFlexBodies1D;
int numFlex2D = numObjectsD.numFlexBodies2D;
// BCE acc on cable elements
if (FlexIndex < numFlex1D) {
uint2 CableNodes = CableElementsNodesD[FlexIndex];
// Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[bceIndex].x);
// Real NA = N_cable.x;
// Real NB = N_cable.y;
Real NA = 1 - FlexSPH_MeshPos_LRF_D[bceIndex].x;
Real NB = FlexSPH_MeshPos_LRF_D[bceIndex].x;
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[CableNodes.x];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[CableNodes.y];
bceAcc[bceIndex + numObjectsD.numRigidMarkers] = NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB;
}
// BCE acc on shell elements
if (FlexIndex >= numFlex1D && FlexIndex < numFlex2D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[bceIndex].x, FlexSPH_MeshPos_LRF_D[bceIndex].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[shellNodes.x];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[shellNodes.y];
Real3 acc_fsi_fea_D_nC = acc_fsi_fea_D[shellNodes.z];
Real3 acc_fsi_fea_D_nD = acc_fsi_fea_D[shellNodes.w];
bceAcc[bceIndex + numObjectsD.numRigidMarkers] =
NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB + NC * acc_fsi_fea_D_nC + ND * acc_fsi_fea_D_nD;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void UpdateRigidMarkersPositionVelocityD(Real4* posRadD,
Real3* velMasD,
Real3* rigidSPH_MeshPos_LRF_D,
uint* rigidIdentifierD,
Real3* posRigidD,
Real4* velMassRigidD,
Real3* omegaLRF_D,
Real4* qD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
int rigidBodyIndex = rigidIdentifierD[index];
Real4 q4 = qD[rigidBodyIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[index];
// position
Real h = posRadD[rigidMarkerIndex].w;
Real3 p_Rigid = posRigidD[rigidBodyIndex];
Real3 pos =
p_Rigid + mR3(dot(a1, rigidSPH_MeshPos_LRF), dot(a2, rigidSPH_MeshPos_LRF), dot(a3, rigidSPH_MeshPos_LRF));
posRadD[rigidMarkerIndex] = mR4(pos, h);
// velocity
Real4 vM_Rigid = velMassRigidD[rigidBodyIndex];
Real3 omega3 = omegaLRF_D[rigidBodyIndex];
Real3 omegaCrossS = cross(omega3, rigidSPH_MeshPos_LRF);
velMasD[rigidMarkerIndex] = mR3(vM_Rigid) + mR3(dot(a1, omegaCrossS), dot(a2, omegaCrossS), dot(a3, omegaCrossS));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void UpdateFlexMarkersPositionVelocityD(Real4* posRadD,
Real3* FlexSPH_MeshPos_LRF_D,
Real3* velMasD,
const uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* dir_fsi_fea_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
uint FlexIndex = FlexIdentifierD[index];
uint numFlex1D = numObjectsD.numFlexBodies1D;
Real Spacing = paramsD.HSML * paramsD.MULT_INITSPACE_Shells;
if (FlexIndex < numFlex1D) {
uint2 CableNodes = CableElementsNodesD[FlexIndex];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[CableNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[CableNodes.y];
Real3 dir_fsi_fea_D_nA = dir_fsi_fea_D[CableNodes.x];
Real3 dir_fsi_fea_D_nB = dir_fsi_fea_D[CableNodes.y];
//// TODO, the direction should be calculated based on node direction and shape function
// Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA;
// x_dir = x_dir / length(x_dir);
Real l = length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA);
Real4 N_dir = Cables_ShapeFunctionsDerivatives(l, FlexSPH_MeshPos_LRF_D[index].x);
Real3 x_dir = normalize(N_dir.x * pos_fsi_fea_D_nA + N_dir.y * dir_fsi_fea_D_nA + N_dir.z * pos_fsi_fea_D_nB +
N_dir.w * dir_fsi_fea_D_nB);
Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y);
y_dir = y_dir / length(y_dir);
Real3 z_dir = cross(x_dir, y_dir);
Real4 N_cable = Cables_ShapeFunctions(l, FlexSPH_MeshPos_LRF_D[index].x);
Real NA = N_cable.x;
Real NAdir = N_cable.y;
Real NB = N_cable.z;
Real NBdir = N_cable.w;
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[CableNodes.x];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[CableNodes.y];
Real h = posRadD[FlexMarkerIndex].w;
Real3 tempPos = NA * pos_fsi_fea_D_nA + NAdir * dir_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB +
NBdir * dir_fsi_fea_D_nB + FlexSPH_MeshPos_LRF_D[index].y * y_dir +
FlexSPH_MeshPos_LRF_D[index].z * z_dir;
posRadD[FlexMarkerIndex] = mR4(tempPos, h);
velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB;
}
if (FlexIndex >= numFlex1D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y];
Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z];
Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w];
Real3 dir_fsi_fea_D_nA = dir_fsi_fea_D[shellNodes.x];
Real3 dir_fsi_fea_D_nB = dir_fsi_fea_D[shellNodes.y];
Real3 dir_fsi_fea_D_nC = dir_fsi_fea_D[shellNodes.z];
Real3 dir_fsi_fea_D_nD = dir_fsi_fea_D[shellNodes.w];
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
Real3 Normal =
normalize(NA * dir_fsi_fea_D_nA + NB * dir_fsi_fea_D_nB + NC * dir_fsi_fea_D_nC + ND * dir_fsi_fea_D_nD);
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[shellNodes.x];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[shellNodes.y];
Real3 vel_fsi_fea_D_nC = vel_fsi_fea_D[shellNodes.z];
Real3 vel_fsi_fea_D_nD = vel_fsi_fea_D[shellNodes.w];
Real h = posRadD[FlexMarkerIndex].w;
Real3 tempPos = NA * pos_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB + NC * pos_fsi_fea_D_nC + ND * pos_fsi_fea_D_nD +
Normal * FlexSPH_MeshPos_LRF_D[index].z * Spacing;
posRadD[FlexMarkerIndex] = mR4(tempPos, h);
velMasD[FlexMarkerIndex] =
NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB + NC * vel_fsi_fea_D_nC + ND * vel_fsi_fea_D_nD;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChBce::ChBce(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
verbose(verb) {
totalForceRigid.resize(0);
totalTorqueRigid.resize(0);
}
ChBce::~ChBce() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Initialize(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
std::vector<int> fsiBodyBceNum,
std::vector<int> fsiShellBceNum,
std::vector<int> fsiCableBceNum) {
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
CopyParams_NumberOfObjects(paramsH, numObjectsH);
// Resizing the arrays used to modify the BCE velocity and pressure according to ADAMI
totalForceRigid.resize(numObjectsH->numRigidBodies);
totalTorqueRigid.resize(numObjectsH->numRigidBodies);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int haveRigid = (numObjectsH->numRigidBodies > 0) ? 1 : 0;
int haveFlex1D = (numObjectsH->numFlexBodies1D > 0) ? 1 : 0;
int haveFlex2D = (numObjectsH->numFlexBodies2D > 0) ? 1 : 0;
int num = haveHelper + haveGhost + haveRigid + haveFlex1D + haveFlex2D + 1;
int numFlexRigidBoundaryMarkers =
fsiGeneralData->referenceArray[num].y - fsiGeneralData->referenceArray[haveHelper + haveGhost].y;
if (verbose) {
printf("Total number of BCE particles = %d\n", numFlexRigidBoundaryMarkers);
if (paramsH->bceType == BceVersion::ADAMI)
printf("Boundary condition for rigid and flexible body is: ADAMI\n");
if (paramsH->bceType == BceVersion::ORIGINAL)
printf("Boundary condition for rigid and flexible body is: ORIGINAL\n");
if (paramsH->bceTypeWall == BceVersion::ADAMI)
printf("Boundary condition for fixed wall is: ADAMI\n");
if (paramsH->bceTypeWall == BceVersion::ORIGINAL)
printf("Boundary condition for fixed wall is: ORIGINAL\n");
}
auto numAllBce = numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers;
if ((int)numAllBce != numFlexRigidBoundaryMarkers) {
throw std::runtime_error(
"Error! number of flex and rigid and "
"boundary markers are saved incorrectly!\n");
}
velMas_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
rhoPreMu_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
tauXxYyZz_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
tauXyXzYz_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
// Populate local position of BCE markers - on rigid bodies
if (haveRigid)
Populate_RigidSPH_MeshPos_LRF(sphMarkersD, fsiBodiesD, fsiBodyBceNum);
// Populate local position of BCE markers - on flexible bodies
if (haveFlex1D || haveFlex2D)
Populate_FlexSPH_MeshPos_LRF(sphMarkersD, fsiMeshD, fsiShellBceNum, fsiCableBceNum);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Populate_RigidSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::vector<int> fsiBodyBceNum) {
// Create map between a BCE on a rigid body and the associated body ID
uint start_bce = 0;
for (int irigid = 0; irigid < fsiBodyBceNum.size(); irigid++) {
uint end_bce = start_bce + fsiBodyBceNum[irigid];
thrust::fill(fsiGeneralData->rigidIdentifierD.begin() + start_bce,
fsiGeneralData->rigidIdentifierD.begin() + end_bce, irigid);
start_bce = end_bce;
}
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
hipLaunchKernelGGL(( Populate_RigidSPH_MeshPos_LRF_D), dim3(nBlocks), dim3(nThreads), 0, 0,
mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR4CAST(fsiBodiesD->q_fsiBodies_D));
hipDeviceSynchronize();
cudaCheckError();
UpdateRigidMarkersPositionVelocity(sphMarkersD, fsiBodiesD);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Populate_FlexSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
std::vector<int> fsiShellBceNum,
std::vector<int> fsiCableBceNum) {
// Create map between a BCE on a flexible body and the associated flexible body ID
uint start_bce = 0;
for (uint icable = 0; icable < fsiCableBceNum.size(); icable++) {
uint end_bce = start_bce + fsiCableBceNum[icable];
thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce,
fsiGeneralData->FlexIdentifierD.begin() + end_bce, icable);
start_bce = end_bce;
}
for (uint ishell = 0; ishell < fsiShellBceNum.size(); ishell++) {
uint end_bce = start_bce + fsiShellBceNum[ishell];
thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce,
fsiGeneralData->FlexIdentifierD.begin() + end_bce, ishell + fsiCableBceNum.size());
start_bce = end_bce;
}
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
thrust::device_vector<Real3> FlexSPH_MeshPos_LRF_H = fsiGeneralData->FlexSPH_MeshPos_LRF_H;
hipLaunchKernelGGL(( Populate_FlexSPH_MeshPos_LRF_D), dim3(nBlocks), dim3(nThreads), 0, 0,
mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(FlexSPH_MeshPos_LRF_H), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->FlexIdentifierD), U2CAST(fsiGeneralData->CableElementsNodesD),
U4CAST(fsiGeneralData->ShellElementsNodesD), mR3CAST(fsiMeshD->pos_fsi_fea_D));
hipDeviceSynchronize();
cudaCheckError();
UpdateFlexMarkersPositionVelocity(sphMarkersD, fsiMeshD);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::ReCalcVelocityPressureStress_BCE(thrust::device_vector<Real3>& velMas_ModifiedBCE,
thrust::device_vector<Real4>& rhoPreMu_ModifiedBCE,
thrust::device_vector<Real3>& tauXxYyZz_ModifiedBCE,
thrust::device_vector<Real3>& tauXyXzYz_ModifiedBCE,
const thrust::device_vector<Real4>& sortedPosRad,
const thrust::device_vector<Real3>& sortedVelMas,
const thrust::device_vector<Real4>& sortedRhoPreMu,
const thrust::device_vector<Real3>& sortedTauXxYyZz,
const thrust::device_vector<Real3>& sortedTauXyXzYz,
const thrust::device_vector<uint>& cellStart,
const thrust::device_vector<uint>& cellEnd,
const thrust::device_vector<uint>& mapOriginalToSorted,
const thrust::device_vector<uint>& extendedActivityIdD,
const thrust::device_vector<Real3>& bceAcc,
int4 updatePortion) {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// thread per particle
int2 newPortion = mI2(updatePortion.x, updatePortion.w);
if (paramsH->bceTypeWall == BceVersion::ORIGINAL) {
// Only implement ADAMI BC for rigid body boundary.
// Implement a simple BC for fixed wall to avoid unnecessary cost.
newPortion = mI2(updatePortion.y, updatePortion.w);
}
uint numBCE = newPortion.y - newPortion.x;
uint numThreads, numBlocks;
computeGridSize(numBCE, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( BCE_VelocityPressureStress), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(velMas_ModifiedBCE), mR4CAST(rhoPreMu_ModifiedBCE), mR3CAST(tauXxYyZz_ModifiedBCE),
mR3CAST(tauXyXzYz_ModifiedBCE), mR4CAST(sortedPosRad), mR3CAST(sortedVelMas), mR4CAST(sortedRhoPreMu),
mR3CAST(sortedTauXxYyZz), mR3CAST(sortedTauXyXzYz), U1CAST(cellStart), U1CAST(cellEnd),
U1CAST(mapOriginalToSorted), U1CAST(extendedActivityIdD), mR3CAST(bceAcc), newPortion, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in new_BCE_VelocityPressure!\n");
hipFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::CalcRigidBceAcceleration(thrust::device_vector<Real3>& bceAcc,
const thrust::device_vector<Real4>& q_fsiBodies_D,
const thrust::device_vector<Real3>& accRigid_fsiBodies_D,
const thrust::device_vector<Real3>& omegaVelLRF_fsiBodies_D,
const thrust::device_vector<Real3>& omegaAccLRF_fsiBodies_D,
const thrust::device_vector<Real3>& rigidSPH_MeshPos_LRF_D,
const thrust::device_vector<uint>& rigidIdentifierD) {
// thread per particle
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( CalcRigidBceAccelerationD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(bceAcc), mR4CAST(q_fsiBodies_D), mR3CAST(accRigid_fsiBodies_D), mR3CAST(omegaVelLRF_fsiBodies_D),
mR3CAST(omegaAccLRF_fsiBodies_D), mR3CAST(rigidSPH_MeshPos_LRF_D), U1CAST(rigidIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::CalcFlexBceAcceleration(thrust::device_vector<Real3>& bceAcc,
const thrust::device_vector<Real3>& acc_fsi_fea_D,
const thrust::device_vector<Real3>& FlexSPH_MeshPos_LRF_D,
const thrust::device_vector<int2>& CableElementsNodesD,
const thrust::device_vector<int4>& ShellElementsNodesD,
const thrust::device_vector<uint>& FlexIdentifierD) {
// thread per particle
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numFlexMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( CalcFlexBceAccelerationD), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(bceAcc), mR3CAST(acc_fsi_fea_D),
mR3CAST(FlexSPH_MeshPos_LRF_D), U2CAST(CableElementsNodesD),
U4CAST(ShellElementsNodesD), U1CAST(FlexIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::ModifyBceVelocityPressureStress(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
auto size_ref = fsiGeneralData->referenceArray.size();
auto numBceMarkers = fsiGeneralData->referenceArray[size_ref - 1].y - fsiGeneralData->referenceArray[0].y;
auto N_all = numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers;
if ((int)N_all != numBceMarkers) {
throw std::runtime_error(
"Error! Number of rigid, flexible and boundary markers are "
"saved incorrectly. Thrown from ModifyBceVelocityPressureStress!\n");
}
if (!(velMas_ModifiedBCE.size() == numBceMarkers && rhoPreMu_ModifiedBCE.size() == numBceMarkers &&
tauXxYyZz_ModifiedBCE.size() == numBceMarkers && tauXyXzYz_ModifiedBCE.size() == numBceMarkers)) {
throw std::runtime_error(
"Error! Size error velMas_ModifiedBCE and "
"tauXxYyZz_ModifiedBCE and tauXyXzYz_ModifiedBCE and "
"rhoPreMu_ModifiedBCE. Thrown from ModifyBceVelocityPressureStress!\n");
}
// Update portion set to boundary, rigid, and flexible BCE particles
int4 updatePortion = mI4(fsiGeneralData->referenceArray[0].y, fsiGeneralData->referenceArray[1].y,
fsiGeneralData->referenceArray[2].y, fsiGeneralData->referenceArray[3].y);
// Only update boundary BCE particles if no rigid/flexible particles
if (size_ref == 2) {
updatePortion.z = fsiGeneralData->referenceArray[1].y;
updatePortion.w = fsiGeneralData->referenceArray[1].y;
}
// Update boundary and rigid/flexible BCE particles
if (size_ref == 3)
updatePortion.w = fsiGeneralData->referenceArray[2].y;
if (paramsH->bceType == BceVersion::ADAMI) {
// ADAMI boundary condition (wall, rigid, flexible)
// Calculate the acceleration of rigid/flexible BCE particles if exist, used for ADAMI BC
thrust::device_vector<Real3> bceAcc(numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers);
// Acceleration of rigid BCE particles
if (numObjectsH->numRigidMarkers > 0) {
CalcRigidBceAcceleration(bceAcc, fsiBodiesD->q_fsiBodies_D, fsiBodiesD->accRigid_fsiBodies_D,
fsiBodiesD->omegaVelLRF_fsiBodies_D, fsiBodiesD->omegaAccLRF_fsiBodies_D,
fsiGeneralData->rigidSPH_MeshPos_LRF_D, fsiGeneralData->rigidIdentifierD);
}
// Acceleration of flexible BCE particles
if (numObjectsH->numFlexMarkers > 0) {
CalcFlexBceAcceleration(bceAcc, fsiMeshD->acc_fsi_fea_D, fsiGeneralData->FlexSPH_MeshPos_LRF_D,
fsiGeneralData->CableElementsNodesD, fsiGeneralData->ShellElementsNodesD,
fsiGeneralData->FlexIdentifierD);
}
if (paramsH->bceTypeWall == BceVersion::ORIGINAL) {
// ADAMI BC for rigid/flexible body, ORIGINAL BC for fixed wall
thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.y,
velMas_ModifiedBCE.begin());
thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x,
sphMarkersD->rhoPresMuD.begin() + updatePortion.y, rhoPreMu_ModifiedBCE.begin());
if (paramsH->elastic_SPH) {
thrust::copy(sphMarkersD->tauXxYyZzD.begin() + updatePortion.x,
sphMarkersD->tauXxYyZzD.begin() + updatePortion.y, tauXxYyZz_ModifiedBCE.begin());
thrust::copy(sphMarkersD->tauXyXzYzD.begin() + updatePortion.x,
sphMarkersD->tauXyXzYzD.begin() + updatePortion.y, tauXyXzYz_ModifiedBCE.begin());
}
if (numObjectsH->numRigidMarkers > 0 || numObjectsH->numFlexMarkers > 0) {
ReCalcVelocityPressureStress_BCE(
velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, tauXxYyZz_ModifiedBCE, tauXyXzYz_ModifiedBCE,
sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD,
sortedSphMarkersD->tauXxYyZzD, sortedSphMarkersD->tauXyXzYzD, markersProximityD->cellStartD,
markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted,
fsiGeneralData->extendedActivityIdD, bceAcc, updatePortion);
}
} else if (paramsH->bceTypeWall == BceVersion::ADAMI) {
// ADAMI BC for both rigid/flexible body and fixed wall
ReCalcVelocityPressureStress_BCE(
velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, tauXxYyZz_ModifiedBCE, tauXyXzYz_ModifiedBCE,
sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD,
sortedSphMarkersD->tauXxYyZzD, sortedSphMarkersD->tauXyXzYzD, markersProximityD->cellStartD,
markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted,
fsiGeneralData->extendedActivityIdD, bceAcc, updatePortion);
}
bceAcc.clear();
} else {
// ORIGINAL boundary condition for all boundaries (wall, rigid, flexible)
thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.w,
velMas_ModifiedBCE.begin());
thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x,
sphMarkersD->rhoPresMuD.begin() + updatePortion.w, rhoPreMu_ModifiedBCE.begin());
if (paramsH->elastic_SPH) {
thrust::copy(sphMarkersD->tauXxYyZzD.begin() + updatePortion.x,
sphMarkersD->tauXxYyZzD.begin() + updatePortion.w, tauXxYyZz_ModifiedBCE.begin());
thrust::copy(sphMarkersD->tauXyXzYzD.begin() + updatePortion.x,
sphMarkersD->tauXyXzYzD.begin() + updatePortion.w, tauXyXzYz_ModifiedBCE.begin());
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Rigid_Forces_Torques(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD) {
if (numObjectsH->numRigidBodies == 0)
return;
thrust::fill(fsiGeneralData->rigid_FSI_ForcesD.begin(), fsiGeneralData->rigid_FSI_ForcesD.end(), mR3(0));
thrust::fill(fsiGeneralData->rigid_FSI_TorquesD.begin(), fsiGeneralData->rigid_FSI_TorquesD.end(), mR3(0));
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
hipLaunchKernelGGL(( Calc_Rigid_FSI_Forces_Torques_D), dim3(nBlocks), dim3(nThreads), 0, 0,
mR3CAST(fsiGeneralData->rigid_FSI_ForcesD), mR3CAST(fsiGeneralData->rigid_FSI_TorquesD),
mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D));
hipDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Flex_Forces(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) {
if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0)
return;
thrust::fill(fsiGeneralData->Flex_FSI_ForcesD.begin(), fsiGeneralData->Flex_FSI_ForcesD.end(), mR3(0));
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
hipLaunchKernelGGL(( Calc_Flex_FSI_ForcesD), dim3(nBlocks), dim3(nThreads), 0, 0,
mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->FlexIdentifierD),
U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD),
mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old),
mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiGeneralData->Flex_FSI_ForcesD));
hipDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::UpdateRigidMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD) {
if (numObjectsH->numRigidBodies == 0)
return;
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
hipLaunchKernelGGL(( UpdateRigidMarkersPositionVelocityD), dim3(nBlocks), dim3(nThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(fsiBodiesD->omegaVelLRF_fsiBodies_D),
mR4CAST(fsiBodiesD->q_fsiBodies_D));
hipDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::UpdateFlexMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0)
return;
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
hipLaunchKernelGGL(( UpdateFlexMarkersPositionVelocityD), dim3(nBlocks), dim3(nThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(sphMarkersD->velMasD),
U1CAST(fsiGeneralData->FlexIdentifierD), U2CAST(fsiGeneralData->CableElementsNodesD),
U4CAST(fsiGeneralData->ShellElementsNodesD), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiMeshD->vel_fsi_fea_D),
mR3CAST(fsiMeshD->dir_fsi_fea_D));
hipDeviceSynchronize();
cudaCheckError();
}
} // end namespace fsi
} // end namespace chrono
| 954801db981470effc68a5259968aa639aa44940.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing boundary condition enforcing (bce) markers forces
// in FSI system.
// =============================================================================
#include "chrono_fsi/physics/ChBce.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include <type_traits>
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ double atomicAdd_double(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Populate_RigidSPH_MeshPos_LRF_D(Real3* rigidSPH_MeshPos_LRF_D,
Real4* posRadD,
uint* rigidIdentifierD,
Real3* posRigidD,
Real4* qD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
int rigidIndex = rigidIdentifierD[index];
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
Real4 q4 = qD[rigidIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 dist3 = mR3(posRadD[rigidMarkerIndex]) - posRigidD[rigidIndex];
Real3 dist3LF = InverseRotate_By_RotationMatrix_DeviceHost(a1, a2, a3, dist3);
// Save the coordinates in the local reference of a rigid body
rigidSPH_MeshPos_LRF_D[index] = dist3LF;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Populate_FlexSPH_MeshPos_LRF_D(Real3* FlexSPH_MeshPos_LRF_D,
Real3* FlexSPH_MeshPos_LRF_H,
Real4* posRadD,
uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real3* pos_fsi_fea_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
// The coordinates of BCE in local reference frame is already calculated when created,
// So only need to copy from host to device here
FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index];
// No need to do it again. Keep this code in case of any issues later
/*int FlexIndex = FlexIdentifierD[index];
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
int numFlex1D = numObjectsD.numFlexBodies1D;
Real Spacing = paramsD.HSML * paramsD.MULT_INITSPACE_Shells;
if (FlexIndex < numFlex1D) {
uint2 cableNodes = CableElementsNodesD[FlexIndex];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[cableNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[cableNodes.y];
Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - pos_fsi_fea_D_nA;
Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA;
Real Cable_x = length(x_dir);
x_dir = x_dir / length(x_dir);
Real norm_dir_length = length(cross(dist3, x_dir));
Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y);
y_dir = y_dir / length(y_dir);
Real3 z_dir = cross(x_dir, y_dir);
Real dx = dot(dist3, x_dir);
Real dy = dot(dist3, y_dir);
Real dz = dot(dist3, z_dir);
if (abs(dy) > 0)
dy /= Spacing;
if (abs(dz) > 0)
dz /= Spacing;
FlexSPH_MeshPos_LRF_D[index] = mR3(dx / Cable_x, dy, dz);
}
if (FlexIndex >= numFlex1D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y];
Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z];
Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w];
Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD);
Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD);
Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB);
Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center;
Real3 physic_to_natural = mR3(1.0 / Shell_x, 1.0 / Shell_y, 1);
Real3 pos_physical = FlexSPH_MeshPos_LRF_H[index];
Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y,
pos_physical.z * physic_to_natural.z);
Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB));
Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC));
Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD));
Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA));
Real3 Normal = normalize(n1 + n2 + n3 + n4);
Real zSide = dot(Normal, dist3) / Spacing;
FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index];
}*/
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Rigid_FSI_Forces_Torques_D(Real3* rigid_FSI_ForcesD,
Real3* rigid_FSI_TorquesD,
Real4* derivVelRhoD,
Real4* derivVelRhoD_old,
Real4* posRadD,
uint* rigidIdentifierD,
Real3* posRigidD,
Real3* rigidSPH_MeshPos_LRF_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
int RigidIndex = rigidIdentifierD[index];
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
Real3 Force = (mR3(derivVelRhoD[rigidMarkerIndex]) * paramsD.Beta +
mR3(derivVelRhoD_old[rigidMarkerIndex]) * (1 - paramsD.Beta)) * paramsD.markerMass;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].x), Force.x);
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].y), Force.y);
atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z);
} else {
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].x), Force.x);
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].y), Force.y);
atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].z), Force.z);
}
Real3 dist3 = Distance(mR3(posRadD[rigidMarkerIndex]), posRigidD[RigidIndex]);
Real3 mtorque = cross(dist3, Force);
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x);
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y);
atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z);
} else {
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x);
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y);
atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Flex_FSI_ForcesD(Real3* FlexSPH_MeshPos_LRF_D,
uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real4* derivVelRhoD,
Real4* derivVelRhoD_old,
Real3* pos_fsi_fea_D,
Real3* Flex_FSI_ForcesD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
int FlexIndex = FlexIdentifierD[index];
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
Real3 Force = (mR3(derivVelRhoD[FlexMarkerIndex]) * paramsD.Beta +
mR3(derivVelRhoD_old[FlexMarkerIndex]) * (1 - paramsD.Beta)) * paramsD.markerMass;
int numFlex1D = numObjectsD.numFlexBodies1D;
if (FlexIndex < numFlex1D) {
// Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x);
// Real NA = N_cable.x;
// Real NB = N_cable.y;
Real NA = 1 - FlexSPH_MeshPos_LRF_D[index].x;
Real NB = FlexSPH_MeshPos_LRF_D[index].x;
int nA = CableElementsNodesD[FlexIndex].x;
int nB = CableElementsNodesD[FlexIndex].y;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
} else {
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
}
}
if (FlexIndex >= numFlex1D) {
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
int nA = ShellElementsNodesD[FlexIndex - numFlex1D].x;
int nB = ShellElementsNodesD[FlexIndex - numFlex1D].y;
int nC = ShellElementsNodesD[FlexIndex - numFlex1D].z;
int nD = ShellElementsNodesD[FlexIndex - numFlex1D].w;
if (std::is_same<Real, double>::value) {
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].x), NC * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].y), NC * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].z), NC * Force.z);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].x), ND * Force.x);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].y), ND * Force.y);
atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].z), ND * Force.z);
} else {
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].x), NC * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].y), NC * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nC].z), NC * Force.z);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].x), ND * Force.x);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].y), ND * Force.y);
atomicAdd((float*)&(Flex_FSI_ForcesD[nD].z), ND * Force.z);
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void BCE_modification_Share(Real3& sumVW,
Real3& sumRhoRW,
Real& sumPW,
Real3& sumTauXxYyZzW,
Real3& sumTauXyXzYzW,
Real& sumWFluid,
int& isAffectedV,
int& isAffectedP,
int3 gridPos,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
Real4 rhoPresMuB = sortedRhoPreMu[j];
Real kernel_radius = RESOLUTION_LENGTH_MULT * paramsD.HSML;
if (dd > kernel_radius * kernel_radius || rhoPresMuB.w > -0.5)
continue;
Real d = length(dist3);
Real Wd = W3h(d, sortedPosRad[j].w);
Real3 velMasB = sortedVelMas[j];
sumVW += velMasB * Wd;
sumRhoRW += rhoPresMuB.x * dist3 * Wd;
sumPW += rhoPresMuB.y * Wd;
sumWFluid += Wd;
sumTauXxYyZzW += sortedTauXxYyZz[j] * Wd;
sumTauXyXzYzW += sortedTauXyXzYz[j] * Wd;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void BCE_VelocityPressureStress(Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* tauXxYyZz_ModifiedBCE,
Real3* tauXyXzYz_ModifiedBCE,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* cellStart,
uint* cellEnd,
uint* mapOriginalToSorted,
uint* extendedActivityIdD,
Real3* bceAcc,
int2 newPortion,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint sphIndex = index + newPortion.x;
if (index >= newPortion.y - newPortion.x)
return;
// no need to do anything if it is not an active particle
uint originalIndex = sphIndex;
uint activity = extendedActivityIdD[originalIndex];
if (activity == 0)
return;
uint bceIndex = index;
if (paramsD.bceTypeWall == BceVersion::ORIGINAL)
bceIndex = index + numObjectsD.numBoundaryMarkers;
uint idA = mapOriginalToSorted[sphIndex];
Real4 rhoPreMuA = sortedRhoPreMu[idA];
Real3 posRadA = mR3(sortedPosRad[idA]);
Real3 velMasA = sortedVelMas[idA];
int isAffectedV = 0;
int isAffectedP = 0;
Real3 sumVW = mR3(0);
Real3 sumRhoRW = mR3(0);
Real sumPW = 0;
Real sumWFluid = 0;
Real3 sumTauXxYyZzW = mR3(0);
Real3 sumTauXyXzYzW = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
BCE_modification_Share(sumVW, sumRhoRW, sumPW, sumTauXxYyZzW, sumTauXyXzYzW, sumWFluid, isAffectedV,
isAffectedP, neighbourPos, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu,
sortedTauXxYyZz, sortedTauXyXzYz, cellStart, cellEnd);
}
}
}
if (abs(sumWFluid) > EPSILON) {
// modify velocity
Real3 modifiedBCE_v = 2 * velMasA - sumVW / sumWFluid;
velMas_ModifiedBCE[bceIndex] = modifiedBCE_v;
// modify pressure and stress
Real3 aW = mR3(0.0);
if (rhoPreMuA.w > 0.5 && rhoPreMuA.w < 1.5) {
// Get acceleration of rigid body's BCE particle
int rigidBceIndex = sphIndex - numObjectsD.startRigidMarkers;
if (rigidBceIndex < 0 || rigidBceIndex >= numObjectsD.numRigidMarkers) {
printf(
"Error! particle index out of bound: thrown from "
"ChBce.cu, new_BCE_VelocityPressure !\n");
*isErrorD = true;
return;
}
aW = bceAcc[rigidBceIndex];
}
if (rhoPreMuA.w > 1.5 && rhoPreMuA.w < 3.5) {
// Get acceleration of flexible body's BCE particle
int flexBceIndex = sphIndex - numObjectsD.startFlexMarkers;
if (flexBceIndex < 0 || flexBceIndex >= numObjectsD.numFlexMarkers) {
printf(
"Error! particle index out of bound: thrown from "
"ChBce.cu, new_BCE_VelocityPressure !\n");
*isErrorD = true;
return;
}
aW = bceAcc[flexBceIndex + numObjectsD.numRigidMarkers];
}
Real pressure = (sumPW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid;
Real density = InvEos(pressure);
rhoPreMu_ModifiedBCE[bceIndex] = mR4(density, pressure, rhoPreMuA.z, rhoPreMuA.w);
if (paramsD.elastic_SPH) {
Real3 tauXxYyZz = (sumTauXxYyZzW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid;
Real3 tauXyXzYz = sumTauXyXzYzW / sumWFluid;
tauXxYyZz_ModifiedBCE[bceIndex] = mR3(tauXxYyZz.x, tauXxYyZz.y, tauXxYyZz.z);
tauXyXzYz_ModifiedBCE[bceIndex] = mR3(tauXyXzYz.x, tauXyXzYz.y, tauXyXzYz.z);
}
} else {
rhoPreMu_ModifiedBCE[bceIndex] = mR4(paramsD.rho0, paramsD.BASEPRES, paramsD.mu0, rhoPreMuA.w);
velMas_ModifiedBCE[bceIndex] = mR3(0.0);
if (paramsD.elastic_SPH) {
tauXxYyZz_ModifiedBCE[bceIndex] = mR3(0.0);
tauXyXzYz_ModifiedBCE[bceIndex] = mR3(0.0);
}
}
sortedVelMas[idA] = velMas_ModifiedBCE[bceIndex];
sortedRhoPreMu[idA] = rhoPreMu_ModifiedBCE[bceIndex];
if (paramsD.elastic_SPH) {
sortedTauXxYyZz[idA] = tauXxYyZz_ModifiedBCE[bceIndex];
sortedTauXyXzYz[idA] = tauXyXzYz_ModifiedBCE[bceIndex];
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcRigidBceAccelerationD(Real3* bceAcc,
Real4* q_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
Real3* rigidSPH_MeshPos_LRF_D,
const uint* rigidIdentifierD) {
uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (bceIndex >= numObjectsD.numRigidMarkers)
return;
int rigidBodyIndex = rigidIdentifierD[bceIndex];
// linear acceleration (CM)
Real3 acc3 = accRigid_fsiBodies_D[rigidBodyIndex];
Real4 q4 = q_fsiBodies_D[rigidBodyIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 wVel3 = omegaVelLRF_fsiBodies_D[rigidBodyIndex];
Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[bceIndex];
Real3 wVelCrossS = cross(wVel3, rigidSPH_MeshPos_LRF);
Real3 wVelCrossWVelCrossS = cross(wVel3, wVelCrossS);
// centrigugal acceleration
acc3 += mR3(dot(a1, wVelCrossWVelCrossS), dot(a2, wVelCrossWVelCrossS), dot(a3, wVelCrossWVelCrossS));
Real3 wAcc3 = omegaAccLRF_fsiBodies_D[rigidBodyIndex];
Real3 wAccCrossS = cross(wAcc3, rigidSPH_MeshPos_LRF);
// tangential acceleration
acc3 += mR3(dot(a1, wAccCrossS), dot(a2, wAccCrossS), dot(a3, wAccCrossS));
bceAcc[bceIndex] = acc3;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcFlexBceAccelerationD(Real3* bceAcc,
Real3* acc_fsi_fea_D,
Real3* FlexSPH_MeshPos_LRF_D,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
const uint* FlexIdentifierD) {
uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (bceIndex >= numObjectsD.numFlexMarkers)
return;
int FlexIndex = FlexIdentifierD[bceIndex];
int numFlex1D = numObjectsD.numFlexBodies1D;
int numFlex2D = numObjectsD.numFlexBodies2D;
// BCE acc on cable elements
if (FlexIndex < numFlex1D) {
uint2 CableNodes = CableElementsNodesD[FlexIndex];
// Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[bceIndex].x);
// Real NA = N_cable.x;
// Real NB = N_cable.y;
Real NA = 1 - FlexSPH_MeshPos_LRF_D[bceIndex].x;
Real NB = FlexSPH_MeshPos_LRF_D[bceIndex].x;
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[CableNodes.x];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[CableNodes.y];
bceAcc[bceIndex + numObjectsD.numRigidMarkers] = NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB;
}
// BCE acc on shell elements
if (FlexIndex >= numFlex1D && FlexIndex < numFlex2D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[bceIndex].x, FlexSPH_MeshPos_LRF_D[bceIndex].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[shellNodes.x];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[shellNodes.y];
Real3 acc_fsi_fea_D_nC = acc_fsi_fea_D[shellNodes.z];
Real3 acc_fsi_fea_D_nD = acc_fsi_fea_D[shellNodes.w];
bceAcc[bceIndex + numObjectsD.numRigidMarkers] =
NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB + NC * acc_fsi_fea_D_nC + ND * acc_fsi_fea_D_nD;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void UpdateRigidMarkersPositionVelocityD(Real4* posRadD,
Real3* velMasD,
Real3* rigidSPH_MeshPos_LRF_D,
uint* rigidIdentifierD,
Real3* posRigidD,
Real4* velMassRigidD,
Real3* omegaLRF_D,
Real4* qD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numRigidMarkers)
return;
uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers;
int rigidBodyIndex = rigidIdentifierD[index];
Real4 q4 = qD[rigidBodyIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[index];
// position
Real h = posRadD[rigidMarkerIndex].w;
Real3 p_Rigid = posRigidD[rigidBodyIndex];
Real3 pos =
p_Rigid + mR3(dot(a1, rigidSPH_MeshPos_LRF), dot(a2, rigidSPH_MeshPos_LRF), dot(a3, rigidSPH_MeshPos_LRF));
posRadD[rigidMarkerIndex] = mR4(pos, h);
// velocity
Real4 vM_Rigid = velMassRigidD[rigidBodyIndex];
Real3 omega3 = omegaLRF_D[rigidBodyIndex];
Real3 omegaCrossS = cross(omega3, rigidSPH_MeshPos_LRF);
velMasD[rigidMarkerIndex] = mR3(vM_Rigid) + mR3(dot(a1, omegaCrossS), dot(a2, omegaCrossS), dot(a3, omegaCrossS));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void UpdateFlexMarkersPositionVelocityD(Real4* posRadD,
Real3* FlexSPH_MeshPos_LRF_D,
Real3* velMasD,
const uint* FlexIdentifierD,
uint2* CableElementsNodesD,
uint4* ShellElementsNodesD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* dir_fsi_fea_D) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numFlexMarkers)
return;
uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers;
uint FlexIndex = FlexIdentifierD[index];
uint numFlex1D = numObjectsD.numFlexBodies1D;
Real Spacing = paramsD.HSML * paramsD.MULT_INITSPACE_Shells;
if (FlexIndex < numFlex1D) {
uint2 CableNodes = CableElementsNodesD[FlexIndex];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[CableNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[CableNodes.y];
Real3 dir_fsi_fea_D_nA = dir_fsi_fea_D[CableNodes.x];
Real3 dir_fsi_fea_D_nB = dir_fsi_fea_D[CableNodes.y];
//// TODO, the direction should be calculated based on node direction and shape function
// Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA;
// x_dir = x_dir / length(x_dir);
Real l = length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA);
Real4 N_dir = Cables_ShapeFunctionsDerivatives(l, FlexSPH_MeshPos_LRF_D[index].x);
Real3 x_dir = normalize(N_dir.x * pos_fsi_fea_D_nA + N_dir.y * dir_fsi_fea_D_nA + N_dir.z * pos_fsi_fea_D_nB +
N_dir.w * dir_fsi_fea_D_nB);
Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y);
y_dir = y_dir / length(y_dir);
Real3 z_dir = cross(x_dir, y_dir);
Real4 N_cable = Cables_ShapeFunctions(l, FlexSPH_MeshPos_LRF_D[index].x);
Real NA = N_cable.x;
Real NAdir = N_cable.y;
Real NB = N_cable.z;
Real NBdir = N_cable.w;
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[CableNodes.x];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[CableNodes.y];
Real h = posRadD[FlexMarkerIndex].w;
Real3 tempPos = NA * pos_fsi_fea_D_nA + NAdir * dir_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB +
NBdir * dir_fsi_fea_D_nB + FlexSPH_MeshPos_LRF_D[index].y * y_dir +
FlexSPH_MeshPos_LRF_D[index].z * z_dir;
posRadD[FlexMarkerIndex] = mR4(tempPos, h);
velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB;
}
if (FlexIndex >= numFlex1D) {
uint4 shellNodes = ShellElementsNodesD[FlexIndex - numFlex1D];
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y];
Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z];
Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w];
Real3 dir_fsi_fea_D_nA = dir_fsi_fea_D[shellNodes.x];
Real3 dir_fsi_fea_D_nB = dir_fsi_fea_D[shellNodes.y];
Real3 dir_fsi_fea_D_nC = dir_fsi_fea_D[shellNodes.z];
Real3 dir_fsi_fea_D_nD = dir_fsi_fea_D[shellNodes.w];
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
Real3 Normal =
normalize(NA * dir_fsi_fea_D_nA + NB * dir_fsi_fea_D_nB + NC * dir_fsi_fea_D_nC + ND * dir_fsi_fea_D_nD);
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[shellNodes.x];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[shellNodes.y];
Real3 vel_fsi_fea_D_nC = vel_fsi_fea_D[shellNodes.z];
Real3 vel_fsi_fea_D_nD = vel_fsi_fea_D[shellNodes.w];
Real h = posRadD[FlexMarkerIndex].w;
Real3 tempPos = NA * pos_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB + NC * pos_fsi_fea_D_nC + ND * pos_fsi_fea_D_nD +
Normal * FlexSPH_MeshPos_LRF_D[index].z * Spacing;
posRadD[FlexMarkerIndex] = mR4(tempPos, h);
velMasD[FlexMarkerIndex] =
NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB + NC * vel_fsi_fea_D_nC + ND * vel_fsi_fea_D_nD;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChBce::ChBce(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
verbose(verb) {
totalForceRigid.resize(0);
totalTorqueRigid.resize(0);
}
ChBce::~ChBce() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Initialize(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
std::vector<int> fsiBodyBceNum,
std::vector<int> fsiShellBceNum,
std::vector<int> fsiCableBceNum) {
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
CopyParams_NumberOfObjects(paramsH, numObjectsH);
// Resizing the arrays used to modify the BCE velocity and pressure according to ADAMI
totalForceRigid.resize(numObjectsH->numRigidBodies);
totalTorqueRigid.resize(numObjectsH->numRigidBodies);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int haveRigid = (numObjectsH->numRigidBodies > 0) ? 1 : 0;
int haveFlex1D = (numObjectsH->numFlexBodies1D > 0) ? 1 : 0;
int haveFlex2D = (numObjectsH->numFlexBodies2D > 0) ? 1 : 0;
int num = haveHelper + haveGhost + haveRigid + haveFlex1D + haveFlex2D + 1;
int numFlexRigidBoundaryMarkers =
fsiGeneralData->referenceArray[num].y - fsiGeneralData->referenceArray[haveHelper + haveGhost].y;
if (verbose) {
printf("Total number of BCE particles = %d\n", numFlexRigidBoundaryMarkers);
if (paramsH->bceType == BceVersion::ADAMI)
printf("Boundary condition for rigid and flexible body is: ADAMI\n");
if (paramsH->bceType == BceVersion::ORIGINAL)
printf("Boundary condition for rigid and flexible body is: ORIGINAL\n");
if (paramsH->bceTypeWall == BceVersion::ADAMI)
printf("Boundary condition for fixed wall is: ADAMI\n");
if (paramsH->bceTypeWall == BceVersion::ORIGINAL)
printf("Boundary condition for fixed wall is: ORIGINAL\n");
}
auto numAllBce = numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers;
if ((int)numAllBce != numFlexRigidBoundaryMarkers) {
throw std::runtime_error(
"Error! number of flex and rigid and "
"boundary markers are saved incorrectly!\n");
}
velMas_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
rhoPreMu_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
tauXxYyZz_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
tauXyXzYz_ModifiedBCE.resize(numFlexRigidBoundaryMarkers);
// Populate local position of BCE markers - on rigid bodies
if (haveRigid)
Populate_RigidSPH_MeshPos_LRF(sphMarkersD, fsiBodiesD, fsiBodyBceNum);
// Populate local position of BCE markers - on flexible bodies
if (haveFlex1D || haveFlex2D)
Populate_FlexSPH_MeshPos_LRF(sphMarkersD, fsiMeshD, fsiShellBceNum, fsiCableBceNum);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Populate_RigidSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::vector<int> fsiBodyBceNum) {
// Create map between a BCE on a rigid body and the associated body ID
uint start_bce = 0;
for (int irigid = 0; irigid < fsiBodyBceNum.size(); irigid++) {
uint end_bce = start_bce + fsiBodyBceNum[irigid];
thrust::fill(fsiGeneralData->rigidIdentifierD.begin() + start_bce,
fsiGeneralData->rigidIdentifierD.begin() + end_bce, irigid);
start_bce = end_bce;
}
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
Populate_RigidSPH_MeshPos_LRF_D<<<nBlocks, nThreads>>>(
mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR4CAST(fsiBodiesD->q_fsiBodies_D));
cudaDeviceSynchronize();
cudaCheckError();
UpdateRigidMarkersPositionVelocity(sphMarkersD, fsiBodiesD);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Populate_FlexSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
std::vector<int> fsiShellBceNum,
std::vector<int> fsiCableBceNum) {
// Create map between a BCE on a flexible body and the associated flexible body ID
uint start_bce = 0;
for (uint icable = 0; icable < fsiCableBceNum.size(); icable++) {
uint end_bce = start_bce + fsiCableBceNum[icable];
thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce,
fsiGeneralData->FlexIdentifierD.begin() + end_bce, icable);
start_bce = end_bce;
}
for (uint ishell = 0; ishell < fsiShellBceNum.size(); ishell++) {
uint end_bce = start_bce + fsiShellBceNum[ishell];
thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce,
fsiGeneralData->FlexIdentifierD.begin() + end_bce, ishell + fsiCableBceNum.size());
start_bce = end_bce;
}
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
thrust::device_vector<Real3> FlexSPH_MeshPos_LRF_H = fsiGeneralData->FlexSPH_MeshPos_LRF_H;
Populate_FlexSPH_MeshPos_LRF_D<<<nBlocks, nThreads>>>(
mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(FlexSPH_MeshPos_LRF_H), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->FlexIdentifierD), U2CAST(fsiGeneralData->CableElementsNodesD),
U4CAST(fsiGeneralData->ShellElementsNodesD), mR3CAST(fsiMeshD->pos_fsi_fea_D));
cudaDeviceSynchronize();
cudaCheckError();
UpdateFlexMarkersPositionVelocity(sphMarkersD, fsiMeshD);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::ReCalcVelocityPressureStress_BCE(thrust::device_vector<Real3>& velMas_ModifiedBCE,
thrust::device_vector<Real4>& rhoPreMu_ModifiedBCE,
thrust::device_vector<Real3>& tauXxYyZz_ModifiedBCE,
thrust::device_vector<Real3>& tauXyXzYz_ModifiedBCE,
const thrust::device_vector<Real4>& sortedPosRad,
const thrust::device_vector<Real3>& sortedVelMas,
const thrust::device_vector<Real4>& sortedRhoPreMu,
const thrust::device_vector<Real3>& sortedTauXxYyZz,
const thrust::device_vector<Real3>& sortedTauXyXzYz,
const thrust::device_vector<uint>& cellStart,
const thrust::device_vector<uint>& cellEnd,
const thrust::device_vector<uint>& mapOriginalToSorted,
const thrust::device_vector<uint>& extendedActivityIdD,
const thrust::device_vector<Real3>& bceAcc,
int4 updatePortion) {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// thread per particle
int2 newPortion = mI2(updatePortion.x, updatePortion.w);
if (paramsH->bceTypeWall == BceVersion::ORIGINAL) {
// Only implement ADAMI BC for rigid body boundary.
// Implement a simple BC for fixed wall to avoid unnecessary cost.
newPortion = mI2(updatePortion.y, updatePortion.w);
}
uint numBCE = newPortion.y - newPortion.x;
uint numThreads, numBlocks;
computeGridSize(numBCE, 256, numBlocks, numThreads);
BCE_VelocityPressureStress<<<numBlocks, numThreads>>>(
mR3CAST(velMas_ModifiedBCE), mR4CAST(rhoPreMu_ModifiedBCE), mR3CAST(tauXxYyZz_ModifiedBCE),
mR3CAST(tauXyXzYz_ModifiedBCE), mR4CAST(sortedPosRad), mR3CAST(sortedVelMas), mR4CAST(sortedRhoPreMu),
mR3CAST(sortedTauXxYyZz), mR3CAST(sortedTauXyXzYz), U1CAST(cellStart), U1CAST(cellEnd),
U1CAST(mapOriginalToSorted), U1CAST(extendedActivityIdD), mR3CAST(bceAcc), newPortion, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in new_BCE_VelocityPressure!\n");
cudaFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::CalcRigidBceAcceleration(thrust::device_vector<Real3>& bceAcc,
const thrust::device_vector<Real4>& q_fsiBodies_D,
const thrust::device_vector<Real3>& accRigid_fsiBodies_D,
const thrust::device_vector<Real3>& omegaVelLRF_fsiBodies_D,
const thrust::device_vector<Real3>& omegaAccLRF_fsiBodies_D,
const thrust::device_vector<Real3>& rigidSPH_MeshPos_LRF_D,
const thrust::device_vector<uint>& rigidIdentifierD) {
// thread per particle
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, numBlocks, numThreads);
CalcRigidBceAccelerationD<<<numBlocks, numThreads>>>(
mR3CAST(bceAcc), mR4CAST(q_fsiBodies_D), mR3CAST(accRigid_fsiBodies_D), mR3CAST(omegaVelLRF_fsiBodies_D),
mR3CAST(omegaAccLRF_fsiBodies_D), mR3CAST(rigidSPH_MeshPos_LRF_D), U1CAST(rigidIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::CalcFlexBceAcceleration(thrust::device_vector<Real3>& bceAcc,
const thrust::device_vector<Real3>& acc_fsi_fea_D,
const thrust::device_vector<Real3>& FlexSPH_MeshPos_LRF_D,
const thrust::device_vector<int2>& CableElementsNodesD,
const thrust::device_vector<int4>& ShellElementsNodesD,
const thrust::device_vector<uint>& FlexIdentifierD) {
// thread per particle
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numFlexMarkers, 256, numBlocks, numThreads);
CalcFlexBceAccelerationD<<<numBlocks, numThreads>>>(mR3CAST(bceAcc), mR3CAST(acc_fsi_fea_D),
mR3CAST(FlexSPH_MeshPos_LRF_D), U2CAST(CableElementsNodesD),
U4CAST(ShellElementsNodesD), U1CAST(FlexIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::ModifyBceVelocityPressureStress(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
auto size_ref = fsiGeneralData->referenceArray.size();
auto numBceMarkers = fsiGeneralData->referenceArray[size_ref - 1].y - fsiGeneralData->referenceArray[0].y;
auto N_all = numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers;
if ((int)N_all != numBceMarkers) {
throw std::runtime_error(
"Error! Number of rigid, flexible and boundary markers are "
"saved incorrectly. Thrown from ModifyBceVelocityPressureStress!\n");
}
if (!(velMas_ModifiedBCE.size() == numBceMarkers && rhoPreMu_ModifiedBCE.size() == numBceMarkers &&
tauXxYyZz_ModifiedBCE.size() == numBceMarkers && tauXyXzYz_ModifiedBCE.size() == numBceMarkers)) {
throw std::runtime_error(
"Error! Size error velMas_ModifiedBCE and "
"tauXxYyZz_ModifiedBCE and tauXyXzYz_ModifiedBCE and "
"rhoPreMu_ModifiedBCE. Thrown from ModifyBceVelocityPressureStress!\n");
}
// Update portion set to boundary, rigid, and flexible BCE particles
int4 updatePortion = mI4(fsiGeneralData->referenceArray[0].y, fsiGeneralData->referenceArray[1].y,
fsiGeneralData->referenceArray[2].y, fsiGeneralData->referenceArray[3].y);
// Only update boundary BCE particles if no rigid/flexible particles
if (size_ref == 2) {
updatePortion.z = fsiGeneralData->referenceArray[1].y;
updatePortion.w = fsiGeneralData->referenceArray[1].y;
}
// Update boundary and rigid/flexible BCE particles
if (size_ref == 3)
updatePortion.w = fsiGeneralData->referenceArray[2].y;
if (paramsH->bceType == BceVersion::ADAMI) {
// ADAMI boundary condition (wall, rigid, flexible)
// Calculate the acceleration of rigid/flexible BCE particles if exist, used for ADAMI BC
thrust::device_vector<Real3> bceAcc(numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers);
// Acceleration of rigid BCE particles
if (numObjectsH->numRigidMarkers > 0) {
CalcRigidBceAcceleration(bceAcc, fsiBodiesD->q_fsiBodies_D, fsiBodiesD->accRigid_fsiBodies_D,
fsiBodiesD->omegaVelLRF_fsiBodies_D, fsiBodiesD->omegaAccLRF_fsiBodies_D,
fsiGeneralData->rigidSPH_MeshPos_LRF_D, fsiGeneralData->rigidIdentifierD);
}
// Acceleration of flexible BCE particles
if (numObjectsH->numFlexMarkers > 0) {
CalcFlexBceAcceleration(bceAcc, fsiMeshD->acc_fsi_fea_D, fsiGeneralData->FlexSPH_MeshPos_LRF_D,
fsiGeneralData->CableElementsNodesD, fsiGeneralData->ShellElementsNodesD,
fsiGeneralData->FlexIdentifierD);
}
if (paramsH->bceTypeWall == BceVersion::ORIGINAL) {
// ADAMI BC for rigid/flexible body, ORIGINAL BC for fixed wall
thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.y,
velMas_ModifiedBCE.begin());
thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x,
sphMarkersD->rhoPresMuD.begin() + updatePortion.y, rhoPreMu_ModifiedBCE.begin());
if (paramsH->elastic_SPH) {
thrust::copy(sphMarkersD->tauXxYyZzD.begin() + updatePortion.x,
sphMarkersD->tauXxYyZzD.begin() + updatePortion.y, tauXxYyZz_ModifiedBCE.begin());
thrust::copy(sphMarkersD->tauXyXzYzD.begin() + updatePortion.x,
sphMarkersD->tauXyXzYzD.begin() + updatePortion.y, tauXyXzYz_ModifiedBCE.begin());
}
if (numObjectsH->numRigidMarkers > 0 || numObjectsH->numFlexMarkers > 0) {
ReCalcVelocityPressureStress_BCE(
velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, tauXxYyZz_ModifiedBCE, tauXyXzYz_ModifiedBCE,
sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD,
sortedSphMarkersD->tauXxYyZzD, sortedSphMarkersD->tauXyXzYzD, markersProximityD->cellStartD,
markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted,
fsiGeneralData->extendedActivityIdD, bceAcc, updatePortion);
}
} else if (paramsH->bceTypeWall == BceVersion::ADAMI) {
// ADAMI BC for both rigid/flexible body and fixed wall
ReCalcVelocityPressureStress_BCE(
velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, tauXxYyZz_ModifiedBCE, tauXyXzYz_ModifiedBCE,
sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD,
sortedSphMarkersD->tauXxYyZzD, sortedSphMarkersD->tauXyXzYzD, markersProximityD->cellStartD,
markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted,
fsiGeneralData->extendedActivityIdD, bceAcc, updatePortion);
}
bceAcc.clear();
} else {
// ORIGINAL boundary condition for all boundaries (wall, rigid, flexible)
thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.w,
velMas_ModifiedBCE.begin());
thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x,
sphMarkersD->rhoPresMuD.begin() + updatePortion.w, rhoPreMu_ModifiedBCE.begin());
if (paramsH->elastic_SPH) {
thrust::copy(sphMarkersD->tauXxYyZzD.begin() + updatePortion.x,
sphMarkersD->tauXxYyZzD.begin() + updatePortion.w, tauXxYyZz_ModifiedBCE.begin());
thrust::copy(sphMarkersD->tauXyXzYzD.begin() + updatePortion.x,
sphMarkersD->tauXyXzYzD.begin() + updatePortion.w, tauXyXzYz_ModifiedBCE.begin());
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Rigid_Forces_Torques(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD) {
if (numObjectsH->numRigidBodies == 0)
return;
thrust::fill(fsiGeneralData->rigid_FSI_ForcesD.begin(), fsiGeneralData->rigid_FSI_ForcesD.end(), mR3(0));
thrust::fill(fsiGeneralData->rigid_FSI_TorquesD.begin(), fsiGeneralData->rigid_FSI_TorquesD.end(), mR3(0));
uint nBlocks, nThreads;
computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
Calc_Rigid_FSI_Forces_Torques_D<<<nBlocks, nThreads>>>(
mR3CAST(fsiGeneralData->rigid_FSI_ForcesD), mR3CAST(fsiGeneralData->rigid_FSI_TorquesD),
mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR4CAST(sphMarkersD->posRadD),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D));
cudaDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::Flex_Forces(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) {
if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0)
return;
thrust::fill(fsiGeneralData->Flex_FSI_ForcesD.begin(), fsiGeneralData->Flex_FSI_ForcesD.end(), mR3(0));
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
Calc_Flex_FSI_ForcesD<<<nBlocks, nThreads>>>(
mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->FlexIdentifierD),
U2CAST(fsiGeneralData->CableElementsNodesD), U4CAST(fsiGeneralData->ShellElementsNodesD),
mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old),
mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiGeneralData->Flex_FSI_ForcesD));
cudaDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::UpdateRigidMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD) {
if (numObjectsH->numRigidBodies == 0)
return;
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numRigidMarkers, 256, nBlocks, nThreads);
UpdateRigidMarkersPositionVelocityD<<<nBlocks, nThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(fsiBodiesD->omegaVelLRF_fsiBodies_D),
mR4CAST(fsiBodiesD->q_fsiBodies_D));
cudaDeviceSynchronize();
cudaCheckError();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChBce::UpdateFlexMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0)
return;
uint nBlocks, nThreads;
computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks, nThreads);
UpdateFlexMarkersPositionVelocityD<<<nBlocks, nThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(sphMarkersD->velMasD),
U1CAST(fsiGeneralData->FlexIdentifierD), U2CAST(fsiGeneralData->CableElementsNodesD),
U4CAST(fsiGeneralData->ShellElementsNodesD), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiMeshD->vel_fsi_fea_D),
mR3CAST(fsiMeshD->dir_fsi_fea_D));
cudaDeviceSynchronize();
cudaCheckError();
}
} // end namespace fsi
} // end namespace chrono
|
315ba514c1a5f671a3c17d767730ab36d2bb661d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include "CUDAKernels.hpp"
#include "HeunSolver.hpp"
#include "Benjamin.hpp"
#include "NonlinearProblem.hpp"
#include "parameters.hpp"
__global__ void InitalisePRNGKernel( const unsigned int noReal,
hiprandState_t* pGlobalState)
{
int index = blockIdx.z * blockDim.x + threadIdx.x;
if (index<noReal)
{
hiprand_init(1337, index, 0, &pGlobalState[index]);
}
}
__global__ void DebugKernel( const unsigned int noReal,
const unsigned int noSims,
const unsigned int noBeta,
hiprandState_t* pGlobalState,
unsigned int* pNoFinished,
float* pEscapeTimes,
int2* pCouplingList,
float* pCouplingStrength)
{
float coupling_strength = pCouplingStrength[blockIdx.x];
int2 p_coupling_list[noNeurons*noNeurons];
for (int i=0;i<noNeurons*noNeurons;++i)
{
p_coupling_list[i].x = pCouplingList[i+blockIdx.y*noNeurons*noNeurons].x;
p_coupling_list[i].y = pCouplingList[i+blockIdx.y*noNeurons*noNeurons].y;
printf("Network no: %d, Output neuron: %d, Input neuron: %d.\n" \
,blockIdx.y,p_coupling_list[i].x,p_coupling_list[i].y);
}
printf("Network no: %d, Strength index: %d, Coupling strength: %f.\n",blockIdx.y,blockIdx.x,coupling_strength);
}
__global__ void SimulateNetworkKernel( const unsigned int noReal,
const unsigned int noSims,
const unsigned int noBeta,
hiprandState_t* pGlobalState,
unsigned int* pNoFinished,
float* pEscapeTimes,
int2* pCouplingList,
float* pCouplingStrength)
{
int index = blockIdx.z * blockDim.x + threadIdx.x;
if (index<noReal)
{
__shared__ int2 p_coupling_list[noNeurons*noNeurons];
hiprandState_t local_state = pGlobalState[index];
// Load coupling list
if (threadIdx.x<noNeurons*noNeurons)
{
p_coupling_list[threadIdx.x].x =
pCouplingList[threadIdx.x+blockIdx.y*noNeurons*noNeurons].x;
p_coupling_list[threadIdx.x].y =
pCouplingList[threadIdx.x+blockIdx.y*noNeurons*noNeurons].y;
}
Benjamin* p_problem = new Benjamin( p_coupling_list,
pCouplingStrength[blockIdx.x]);
HeunSolver* p_solver = new HeunSolver( timestep, local_state, p_problem);
// Initialise system
float time = 0.0f;
float2 u[noNeurons];
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
u[i].x = 0.0f;
u[i].y = 0.0f;
}
int noCrossed = 0;
int crossings = 0;
do
{
p_solver->HeunStep( time, u);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
if (!((crossings & (1<<i)) || (u[i].x*u[i].x+u[i].y*u[i].y<unstable_radius_squared)))
{
atomicAdd( pEscapeTimes+(blockIdx.x+blockIdx.y*noBeta)*noNeurons+noCrossed, time);
noCrossed++;
crossings += (1<<i);
}
}
time += timestep;
} while (noCrossed<noNeurons);
printf("Finished simulation %d of %d.", atomicAdd( pNoFinished,1)+1,noSims);
delete( p_solver);
delete( p_problem);
}
}
| 315ba514c1a5f671a3c17d767730ab36d2bb661d.cu | #include <curand_kernel.h>
#include "CUDAKernels.hpp"
#include "HeunSolver.hpp"
#include "Benjamin.hpp"
#include "NonlinearProblem.hpp"
#include "parameters.hpp"
__global__ void InitalisePRNGKernel( const unsigned int noReal,
curandState* pGlobalState)
{
int index = blockIdx.z * blockDim.x + threadIdx.x;
if (index<noReal)
{
curand_init(1337, index, 0, &pGlobalState[index]);
}
}
__global__ void DebugKernel( const unsigned int noReal,
const unsigned int noSims,
const unsigned int noBeta,
curandState* pGlobalState,
unsigned int* pNoFinished,
float* pEscapeTimes,
int2* pCouplingList,
float* pCouplingStrength)
{
float coupling_strength = pCouplingStrength[blockIdx.x];
int2 p_coupling_list[noNeurons*noNeurons];
for (int i=0;i<noNeurons*noNeurons;++i)
{
p_coupling_list[i].x = pCouplingList[i+blockIdx.y*noNeurons*noNeurons].x;
p_coupling_list[i].y = pCouplingList[i+blockIdx.y*noNeurons*noNeurons].y;
printf("Network no: %d, Output neuron: %d, Input neuron: %d.\n" \
,blockIdx.y,p_coupling_list[i].x,p_coupling_list[i].y);
}
printf("Network no: %d, Strength index: %d, Coupling strength: %f.\n",blockIdx.y,blockIdx.x,coupling_strength);
}
__global__ void SimulateNetworkKernel( const unsigned int noReal,
const unsigned int noSims,
const unsigned int noBeta,
curandState* pGlobalState,
unsigned int* pNoFinished,
float* pEscapeTimes,
int2* pCouplingList,
float* pCouplingStrength)
{
int index = blockIdx.z * blockDim.x + threadIdx.x;
if (index<noReal)
{
__shared__ int2 p_coupling_list[noNeurons*noNeurons];
curandState local_state = pGlobalState[index];
// Load coupling list
if (threadIdx.x<noNeurons*noNeurons)
{
p_coupling_list[threadIdx.x].x =
pCouplingList[threadIdx.x+blockIdx.y*noNeurons*noNeurons].x;
p_coupling_list[threadIdx.x].y =
pCouplingList[threadIdx.x+blockIdx.y*noNeurons*noNeurons].y;
}
Benjamin* p_problem = new Benjamin( p_coupling_list,
pCouplingStrength[blockIdx.x]);
HeunSolver* p_solver = new HeunSolver( timestep, local_state, p_problem);
// Initialise system
float time = 0.0f;
float2 u[noNeurons];
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
u[i].x = 0.0f;
u[i].y = 0.0f;
}
int noCrossed = 0;
int crossings = 0;
do
{
p_solver->HeunStep( time, u);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
if (!((crossings & (1<<i)) || (u[i].x*u[i].x+u[i].y*u[i].y<unstable_radius_squared)))
{
atomicAdd( pEscapeTimes+(blockIdx.x+blockIdx.y*noBeta)*noNeurons+noCrossed, time);
noCrossed++;
crossings += (1<<i);
}
}
time += timestep;
} while (noCrossed<noNeurons);
printf("Finished simulation %d of %d.", atomicAdd( pNoFinished,1)+1,noSims);
delete( p_solver);
delete( p_problem);
}
}
|
385d0c0d9d830a8bf6bfabe95e5ca62b05f8141f.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
* penguinV: https://github.com/ihhub/penguinV *
* Copyright (C) 2017 - 2022 *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include "performance_test_helper_cuda.cuh"
#include "../../../src/cuda/cuda_helper.cuh"
namespace
{
void setCudaThreadCount( uint32_t threadCount )
{
multiCuda::CudaDeviceManager::instance().device().setThreadsPerBlock( threadCount );
}
uint32_t getMaximumCudaThreadCount()
{
return multiCuda::CudaDeviceManager::instance().device().maximumThreadsPerBlock();
}
}
namespace Performance_Test
{
namespace Cuda_Helper
{
TimerContainerCuda::TimerContainerCuda()
{
multiCuda::cudaCheck( hipEventCreate( &_startEvent ) );
multiCuda::cudaCheck( hipEventCreate( &_stopEvent ) );
}
TimerContainerCuda::~TimerContainerCuda()
{
multiCuda::cudaCheck( hipEventDestroy( _startEvent ) );
multiCuda::cudaCheck( hipEventDestroy( _stopEvent ) );
}
void TimerContainerCuda::start()
{
multiCuda::cudaCheck( hipEventRecord( _startEvent, multiCuda::getCudaStream() ) );
}
void TimerContainerCuda::stop()
{
multiCuda::cudaCheck( hipEventRecord( _stopEvent, multiCuda::getCudaStream() ) );
multiCuda::cudaCheck( hipEventSynchronize( _stopEvent ) );
float time = 0.0f;
multiCuda::cudaCheck( hipEventElapsedTime( &time, _startEvent, _stopEvent ) );
push( time );
}
std::pair < double, double > runPerformanceTestCuda( performanceFunctionCuda function, uint32_t size, uint32_t threadCountDivider )
{
setCudaThreadCount( getMaximumCudaThreadCount() / threadCountDivider );
TimerContainerCuda timer;
function(timer, size);
return timer.mean();
}
penguinV::Image uniformImage( uint32_t width, uint32_t height )
{
return uniformImage( width, height, randomValue<uint8_t>( 256 ) );
}
penguinV::Image uniformImage( uint32_t width, uint32_t height, uint8_t value )
{
penguinV::ImageCuda image( width, height );
image.fill( value );
penguinV::Image imageOut;
imageOut.swap( image );
return imageOut;
}
std::vector<penguinV::Image> uniformImages( uint32_t count, uint32_t width, uint32_t height )
{
std::vector<penguinV::Image> image( count );
for ( std::vector<penguinV::Image>::iterator im = image.begin(); im != image.end(); ++im )
*im = uniformImage( width, height );
return image;
}
}
}
| 385d0c0d9d830a8bf6bfabe95e5ca62b05f8141f.cu | /***************************************************************************
* penguinV: https://github.com/ihhub/penguinV *
* Copyright (C) 2017 - 2022 *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include "performance_test_helper_cuda.cuh"
#include "../../../src/cuda/cuda_helper.cuh"
namespace
{
void setCudaThreadCount( uint32_t threadCount )
{
multiCuda::CudaDeviceManager::instance().device().setThreadsPerBlock( threadCount );
}
uint32_t getMaximumCudaThreadCount()
{
return multiCuda::CudaDeviceManager::instance().device().maximumThreadsPerBlock();
}
}
namespace Performance_Test
{
namespace Cuda_Helper
{
TimerContainerCuda::TimerContainerCuda()
{
multiCuda::cudaCheck( cudaEventCreate( &_startEvent ) );
multiCuda::cudaCheck( cudaEventCreate( &_stopEvent ) );
}
TimerContainerCuda::~TimerContainerCuda()
{
multiCuda::cudaCheck( cudaEventDestroy( _startEvent ) );
multiCuda::cudaCheck( cudaEventDestroy( _stopEvent ) );
}
void TimerContainerCuda::start()
{
multiCuda::cudaCheck( cudaEventRecord( _startEvent, multiCuda::getCudaStream() ) );
}
void TimerContainerCuda::stop()
{
multiCuda::cudaCheck( cudaEventRecord( _stopEvent, multiCuda::getCudaStream() ) );
multiCuda::cudaCheck( cudaEventSynchronize( _stopEvent ) );
float time = 0.0f;
multiCuda::cudaCheck( cudaEventElapsedTime( &time, _startEvent, _stopEvent ) );
push( time );
}
std::pair < double, double > runPerformanceTestCuda( performanceFunctionCuda function, uint32_t size, uint32_t threadCountDivider )
{
setCudaThreadCount( getMaximumCudaThreadCount() / threadCountDivider );
TimerContainerCuda timer;
function(timer, size);
return timer.mean();
}
penguinV::Image uniformImage( uint32_t width, uint32_t height )
{
return uniformImage( width, height, randomValue<uint8_t>( 256 ) );
}
penguinV::Image uniformImage( uint32_t width, uint32_t height, uint8_t value )
{
penguinV::ImageCuda image( width, height );
image.fill( value );
penguinV::Image imageOut;
imageOut.swap( image );
return imageOut;
}
std::vector<penguinV::Image> uniformImages( uint32_t count, uint32_t width, uint32_t height )
{
std::vector<penguinV::Image> image( count );
for ( std::vector<penguinV::Image>::iterator im = image.begin(); im != image.end(); ++im )
*im = uniformImage( width, height );
return image;
}
}
}
|
48837f216f621d1489b8ab41790d993d5c68ee48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include "tmutil.h"
#include "ipcbuf_cuda.h"
/* zero bytes in an ipcbuf, by reading from previously zerod device memory */
extern "C"
ssize_t ipcbuf_zero_next_block_cuda (ipcbuf_t* id, char * dev_ptr, size_t dev_bytes, hipStream_t stream)
{
ipcsync_t* sync = id->sync;
/* must be the designated writer */
if (!ipcbuf_is_writer(id))
{
fprintf (stderr, "ipcbuf_zero_next_block_cuda: process is not writer\n");
return -1;
}
// get the next buffer to be written
uint64_t next_buf = (sync->w_buf_next + 1) % sync->nbufs;
char have_cleared = 0;
unsigned iread;
while (!have_cleared)
{
have_cleared = 1;
// check that each reader has 1 clear buffer at least
for (iread = 0; iread < sync->n_readers; iread++ )
{
if (semctl (id->semid_data[iread], IPCBUF_CLEAR, GETVAL) == 0)
have_cleared = 0;
}
if (!have_cleared)
float_sleep((double)0.01);
}
uint64_t bytes_zeroed = 0;
while (bytes_zeroed < id->sync->bufsz)
{
uint64_t bytes_to_zero = id->sync->bufsz - bytes_zeroed;
if (bytes_to_zero > dev_bytes)
bytes_to_zero = dev_bytes;
if (stream)
hipMemcpyAsync (id->buffer[next_buf], dev_ptr, bytes_to_zero, hipMemcpyDeviceToHost, stream);
else
hipMemcpy (id->buffer[next_buf], dev_ptr, bytes_to_zero, hipMemcpyDeviceToHost);
bytes_zeroed += bytes_to_zero;
}
// NB explicitly do not synchronize, expect the called to do this before using the buffer[next_buf]
//if (stream)
// hipStreamSynchronize (stream);
return 0;
}
| 48837f216f621d1489b8ab41790d993d5c68ee48.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include "tmutil.h"
#include "ipcbuf_cuda.h"
/* zero bytes in an ipcbuf, by reading from previously zerod device memory */
extern "C"
ssize_t ipcbuf_zero_next_block_cuda (ipcbuf_t* id, char * dev_ptr, size_t dev_bytes, cudaStream_t stream)
{
ipcsync_t* sync = id->sync;
/* must be the designated writer */
if (!ipcbuf_is_writer(id))
{
fprintf (stderr, "ipcbuf_zero_next_block_cuda: process is not writer\n");
return -1;
}
// get the next buffer to be written
uint64_t next_buf = (sync->w_buf_next + 1) % sync->nbufs;
char have_cleared = 0;
unsigned iread;
while (!have_cleared)
{
have_cleared = 1;
// check that each reader has 1 clear buffer at least
for (iread = 0; iread < sync->n_readers; iread++ )
{
if (semctl (id->semid_data[iread], IPCBUF_CLEAR, GETVAL) == 0)
have_cleared = 0;
}
if (!have_cleared)
float_sleep((double)0.01);
}
uint64_t bytes_zeroed = 0;
while (bytes_zeroed < id->sync->bufsz)
{
uint64_t bytes_to_zero = id->sync->bufsz - bytes_zeroed;
if (bytes_to_zero > dev_bytes)
bytes_to_zero = dev_bytes;
if (stream)
cudaMemcpyAsync (id->buffer[next_buf], dev_ptr, bytes_to_zero, cudaMemcpyDeviceToHost, stream);
else
cudaMemcpy (id->buffer[next_buf], dev_ptr, bytes_to_zero, cudaMemcpyDeviceToHost);
bytes_zeroed += bytes_to_zero;
}
// NB explicitly do not synchronize, expect the called to do this before using the buffer[next_buf]
//if (stream)
// cudaStreamSynchronize (stream);
return 0;
}
|
684b3635f86b86f48619bf5c9ac72c4fc500ca24.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
__global__ void lots_of_float_compute(float *inputs, int N, size_t niters,
float *outputs)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t nthreads = gridDim.x * blockDim.x;
for (; tid < N; tid += nthreads)
{
size_t iter;
float val = inputs[tid];
for (iter = 0; iter < niters; iter++)
{
val = (val + 5.0f) - 101.0f;
val = (val / 3.0f) + 102.0f;
val = (val + 1.07f) - 103.0f;
val = (val / 1.037f) + 104.0f;
val = (val + 3.00f) - 105.0f;
val = (val / 0.22f) + 106.0f;
}
outputs[tid] = val;
}
}
__global__ void lots_of_double_compute(double *inputs, int N, size_t niters,
double *outputs)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t nthreads = gridDim.x * blockDim.x;
for (; tid < N; tid += nthreads)
{
size_t iter;
double val = inputs[tid];
for (iter = 0; iter < niters; iter++)
{
val = (val + 5.0) - 101.0;
val = (val / 3.0) + 102.0;
val = (val + 1.07) - 103.0;
val = (val / 1.037) + 104.0;
val = (val + 3.00) - 105.0;
val = (val / 0.22) + 106.0;
}
outputs[tid] = val;
}
}
static void run_float_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock,
long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl,
float *sample, int sampleLength)
{
int i;
float *h_floatInputs, *h_floatOutputs;
float *d_floatInputs, *d_floatOutputs;
h_floatInputs = (float *)malloc(sizeof(float) * N);
h_floatOutputs = (float *)malloc(sizeof(float) * N);
gpuErrchk(hipMalloc((void **)&d_floatInputs, sizeof(float) * N));
gpuErrchk(hipMalloc((void **)&d_floatOutputs, sizeof(float) * N));
for (i = 0; i < N; i++)
{
h_floatInputs[i] = (float)i;
}
clock_t ops_start, ops_end;
ops_start = clock();
gpuErrchk(hipMemcpy(d_floatInputs, h_floatInputs, sizeof(float) * N,hipMemcpyHostToDevice));
ops_end = clock();
*to_device_clock_cyl = ops_end - ops_start;
ops_start = clock();
lots_of_float_compute << <blocksPerGrid, threadsPerBlock >> >(d_floatInputs,N, niters, d_floatOutputs);
gpuErrchk(hipDeviceSynchronize());
ops_end = clock();
*kernel_clock_cyl = ops_end - ops_start;
ops_start = clock();
gpuErrchk(hipMemcpy(h_floatOutputs, d_floatOutputs, sizeof(float) * N,hipMemcpyDeviceToHost));
ops_end = clock();
*from_device_clock_cyl = ops_end - ops_start;
for (i = 0; i < sampleLength; i++)
{
sample[i] = h_floatOutputs[i];
}
gpuErrchk(hipFree(d_floatInputs));
gpuErrchk(hipFree(d_floatOutputs));
free(h_floatInputs);
free(h_floatOutputs);
}
static void run_double_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock,
long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl,
double *sample, int sampleLength)
{
int i;
double *h_doubleInputs, *h_doubleOutputs;
double *d_doubleInputs, *d_doubleOutputs;
h_doubleInputs = (double *)malloc(sizeof(double) * N);
h_doubleOutputs = (double *)malloc(sizeof(double) * N);
gpuErrchk(hipMalloc((void **)&d_doubleInputs, sizeof(double) * N));
gpuErrchk(hipMalloc((void **)&d_doubleOutputs, sizeof(double) * N));
for (i = 0; i < N; i++)
{
h_doubleInputs[i] = (double)i;
}
clock_t ops_start, ops_end;
ops_start = clock();
gpuErrchk(hipMemcpy(d_doubleInputs, h_doubleInputs, sizeof(double) * N,
hipMemcpyHostToDevice));
ops_end = clock();
*to_device_clock_cyl = ops_end - ops_start;
ops_start = clock();
lots_of_double_compute << <blocksPerGrid, threadsPerBlock >> >(d_doubleInputs,
N, niters, d_doubleOutputs);
gpuErrchk(hipDeviceSynchronize());
ops_end = clock();
*kernel_clock_cyl = ops_end - ops_start;
ops_start = clock();
gpuErrchk(hipMemcpy(h_doubleOutputs, d_doubleOutputs, sizeof(double) * N,
hipMemcpyDeviceToHost));
ops_end = clock();
*from_device_clock_cyl = ops_end - ops_start;
for (i = 0; i < sampleLength; i++)
{
sample[i] = h_doubleOutputs[i];
}
gpuErrchk(hipFree(d_doubleInputs));
gpuErrchk(hipFree(d_doubleOutputs));
free(h_doubleInputs);
free(h_doubleOutputs);
}
//int main(int argc, char **argv)
//{
// int i;
// double meanFloatToDeviceTime, meanFloatKernelTime, meanFloatFromDeviceTime;
// double meanDoubleToDeviceTime, meanDoubleKernelTime,
// meanDoubleFromDeviceTime;
// struct hipDeviceProp_t deviceProperties;
// size_t totalMem, freeMem;
// float *floatSample;
// double *doubleSample;
// int sampleLength = 10;
// int nRuns = 5;
// int nKernelIters = 20;
//
// meanFloatToDeviceTime = meanFloatKernelTime = meanFloatFromDeviceTime = 0.0;
// meanDoubleToDeviceTime = meanDoubleKernelTime =
// meanDoubleFromDeviceTime = 0.0;
//
// gpuErrchk(hipMemGetInfo(&freeMem, &totalMem));
// gpuErrchk(hipGetDeviceProperties(&deviceProperties, 0));
//
// size_t N = (freeMem * 0.9 / 2) / sizeof(double);
// int threadsPerBlock = 256;
// int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
//
// if (blocksPerGrid > deviceProperties.maxGridSize[0])
// {
// blocksPerGrid = deviceProperties.maxGridSize[0];
// }
//
// printf("Running %d blocks with %d threads/block over %lu elements\n",
// blocksPerGrid, threadsPerBlock, N);
//
// floatSample = (float *)malloc(sizeof(float) * sampleLength);
// doubleSample = (double *)malloc(sizeof(double) * sampleLength);
//
// for (i = 0; i < nRuns; i++)
// {
// long toDeviceTime, kernelTime, fromDeviceTime;
//
// run_float_test(N, nKernelIters, blocksPerGrid, threadsPerBlock,
// &toDeviceTime, &kernelTime, &fromDeviceTime,
// floatSample, sampleLength);
// meanFloatToDeviceTime += toDeviceTime;
// meanFloatKernelTime += kernelTime;
// meanFloatFromDeviceTime += fromDeviceTime;
//
// run_double_test(N, nKernelIters, blocksPerGrid, threadsPerBlock,
// &toDeviceTime, &kernelTime, &fromDeviceTime,
// doubleSample, sampleLength);
// meanDoubleToDeviceTime += toDeviceTime;
// meanDoubleKernelTime += kernelTime;
// meanDoubleFromDeviceTime += fromDeviceTime;
// }
//
// meanFloatToDeviceTime /= nRuns;
// meanFloatKernelTime /= nRuns;
// meanFloatFromDeviceTime /= nRuns;
// meanDoubleToDeviceTime /= nRuns;
// meanDoubleKernelTime /= nRuns;
// meanDoubleFromDeviceTime /= nRuns;
//
// meanFloatToDeviceTime /= CLOCKS_PER_SEC;
// meanFloatKernelTime /= CLOCKS_PER_SEC;
// meanFloatFromDeviceTime /= CLOCKS_PER_SEC;
// meanDoubleToDeviceTime /= CLOCKS_PER_SEC;
// meanDoubleKernelTime /= CLOCKS_PER_SEC;
// meanDoubleFromDeviceTime /= CLOCKS_PER_SEC;
//
// printf("For single-precision floating point, mean times for:\n");
// printf(" Copy to device: %f s\n", meanFloatToDeviceTime);
// printf(" Kernel execution: %f s\n", meanFloatKernelTime);
// printf(" Copy from device: %f s\n", meanFloatFromDeviceTime);
// printf("For double-precision floating point, mean times for:\n");
// printf(" Copy to device: %f s (%.2fx slower than single-precision)\n",
// meanDoubleToDeviceTime,
// meanDoubleToDeviceTime / meanFloatToDeviceTime);
// printf(" Kernel execution: %f s (%.2fx slower than single-precision)\n",
// meanDoubleKernelTime,
// meanDoubleKernelTime / meanFloatKernelTime);
// printf(" Copy from device: %f s (%.2fx slower than single-precision)\n",
// meanDoubleFromDeviceTime,
// meanDoubleFromDeviceTime / meanFloatFromDeviceTime);
//
// return 0;
//}
| 684b3635f86b86f48619bf5c9ac72c4fc500ca24.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
__global__ void lots_of_float_compute(float *inputs, int N, size_t niters,
float *outputs)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t nthreads = gridDim.x * blockDim.x;
for (; tid < N; tid += nthreads)
{
size_t iter;
float val = inputs[tid];
for (iter = 0; iter < niters; iter++)
{
val = (val + 5.0f) - 101.0f;
val = (val / 3.0f) + 102.0f;
val = (val + 1.07f) - 103.0f;
val = (val / 1.037f) + 104.0f;
val = (val + 3.00f) - 105.0f;
val = (val / 0.22f) + 106.0f;
}
outputs[tid] = val;
}
}
__global__ void lots_of_double_compute(double *inputs, int N, size_t niters,
double *outputs)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t nthreads = gridDim.x * blockDim.x;
for (; tid < N; tid += nthreads)
{
size_t iter;
double val = inputs[tid];
for (iter = 0; iter < niters; iter++)
{
val = (val + 5.0) - 101.0;
val = (val / 3.0) + 102.0;
val = (val + 1.07) - 103.0;
val = (val / 1.037) + 104.0;
val = (val + 3.00) - 105.0;
val = (val / 0.22) + 106.0;
}
outputs[tid] = val;
}
}
static void run_float_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock,
long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl,
float *sample, int sampleLength)
{
int i;
float *h_floatInputs, *h_floatOutputs;
float *d_floatInputs, *d_floatOutputs;
h_floatInputs = (float *)malloc(sizeof(float) * N);
h_floatOutputs = (float *)malloc(sizeof(float) * N);
gpuErrchk(cudaMalloc((void **)&d_floatInputs, sizeof(float) * N));
gpuErrchk(cudaMalloc((void **)&d_floatOutputs, sizeof(float) * N));
for (i = 0; i < N; i++)
{
h_floatInputs[i] = (float)i;
}
clock_t ops_start, ops_end;
ops_start = clock();
gpuErrchk(cudaMemcpy(d_floatInputs, h_floatInputs, sizeof(float) * N,cudaMemcpyHostToDevice));
ops_end = clock();
*to_device_clock_cyl = ops_end - ops_start;
ops_start = clock();
lots_of_float_compute << <blocksPerGrid, threadsPerBlock >> >(d_floatInputs,N, niters, d_floatOutputs);
gpuErrchk(cudaDeviceSynchronize());
ops_end = clock();
*kernel_clock_cyl = ops_end - ops_start;
ops_start = clock();
gpuErrchk(cudaMemcpy(h_floatOutputs, d_floatOutputs, sizeof(float) * N,cudaMemcpyDeviceToHost));
ops_end = clock();
*from_device_clock_cyl = ops_end - ops_start;
for (i = 0; i < sampleLength; i++)
{
sample[i] = h_floatOutputs[i];
}
gpuErrchk(cudaFree(d_floatInputs));
gpuErrchk(cudaFree(d_floatOutputs));
free(h_floatInputs);
free(h_floatOutputs);
}
static void run_double_test(size_t N, int niters, int blocksPerGrid,int threadsPerBlock,
long* to_device_clock_cyl,long * kernel_clock_cyl, long* from_device_clock_cyl,
double *sample, int sampleLength)
{
int i;
double *h_doubleInputs, *h_doubleOutputs;
double *d_doubleInputs, *d_doubleOutputs;
h_doubleInputs = (double *)malloc(sizeof(double) * N);
h_doubleOutputs = (double *)malloc(sizeof(double) * N);
gpuErrchk(cudaMalloc((void **)&d_doubleInputs, sizeof(double) * N));
gpuErrchk(cudaMalloc((void **)&d_doubleOutputs, sizeof(double) * N));
for (i = 0; i < N; i++)
{
h_doubleInputs[i] = (double)i;
}
clock_t ops_start, ops_end;
ops_start = clock();
gpuErrchk(cudaMemcpy(d_doubleInputs, h_doubleInputs, sizeof(double) * N,
cudaMemcpyHostToDevice));
ops_end = clock();
*to_device_clock_cyl = ops_end - ops_start;
ops_start = clock();
lots_of_double_compute << <blocksPerGrid, threadsPerBlock >> >(d_doubleInputs,
N, niters, d_doubleOutputs);
gpuErrchk(cudaDeviceSynchronize());
ops_end = clock();
*kernel_clock_cyl = ops_end - ops_start;
ops_start = clock();
gpuErrchk(cudaMemcpy(h_doubleOutputs, d_doubleOutputs, sizeof(double) * N,
cudaMemcpyDeviceToHost));
ops_end = clock();
*from_device_clock_cyl = ops_end - ops_start;
for (i = 0; i < sampleLength; i++)
{
sample[i] = h_doubleOutputs[i];
}
gpuErrchk(cudaFree(d_doubleInputs));
gpuErrchk(cudaFree(d_doubleOutputs));
free(h_doubleInputs);
free(h_doubleOutputs);
}
//int main(int argc, char **argv)
//{
// int i;
// double meanFloatToDeviceTime, meanFloatKernelTime, meanFloatFromDeviceTime;
// double meanDoubleToDeviceTime, meanDoubleKernelTime,
// meanDoubleFromDeviceTime;
// struct cudaDeviceProp deviceProperties;
// size_t totalMem, freeMem;
// float *floatSample;
// double *doubleSample;
// int sampleLength = 10;
// int nRuns = 5;
// int nKernelIters = 20;
//
// meanFloatToDeviceTime = meanFloatKernelTime = meanFloatFromDeviceTime = 0.0;
// meanDoubleToDeviceTime = meanDoubleKernelTime =
// meanDoubleFromDeviceTime = 0.0;
//
// gpuErrchk(cudaMemGetInfo(&freeMem, &totalMem));
// gpuErrchk(cudaGetDeviceProperties(&deviceProperties, 0));
//
// size_t N = (freeMem * 0.9 / 2) / sizeof(double);
// int threadsPerBlock = 256;
// int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
//
// if (blocksPerGrid > deviceProperties.maxGridSize[0])
// {
// blocksPerGrid = deviceProperties.maxGridSize[0];
// }
//
// printf("Running %d blocks with %d threads/block over %lu elements\n",
// blocksPerGrid, threadsPerBlock, N);
//
// floatSample = (float *)malloc(sizeof(float) * sampleLength);
// doubleSample = (double *)malloc(sizeof(double) * sampleLength);
//
// for (i = 0; i < nRuns; i++)
// {
// long toDeviceTime, kernelTime, fromDeviceTime;
//
// run_float_test(N, nKernelIters, blocksPerGrid, threadsPerBlock,
// &toDeviceTime, &kernelTime, &fromDeviceTime,
// floatSample, sampleLength);
// meanFloatToDeviceTime += toDeviceTime;
// meanFloatKernelTime += kernelTime;
// meanFloatFromDeviceTime += fromDeviceTime;
//
// run_double_test(N, nKernelIters, blocksPerGrid, threadsPerBlock,
// &toDeviceTime, &kernelTime, &fromDeviceTime,
// doubleSample, sampleLength);
// meanDoubleToDeviceTime += toDeviceTime;
// meanDoubleKernelTime += kernelTime;
// meanDoubleFromDeviceTime += fromDeviceTime;
// }
//
// meanFloatToDeviceTime /= nRuns;
// meanFloatKernelTime /= nRuns;
// meanFloatFromDeviceTime /= nRuns;
// meanDoubleToDeviceTime /= nRuns;
// meanDoubleKernelTime /= nRuns;
// meanDoubleFromDeviceTime /= nRuns;
//
// meanFloatToDeviceTime /= CLOCKS_PER_SEC;
// meanFloatKernelTime /= CLOCKS_PER_SEC;
// meanFloatFromDeviceTime /= CLOCKS_PER_SEC;
// meanDoubleToDeviceTime /= CLOCKS_PER_SEC;
// meanDoubleKernelTime /= CLOCKS_PER_SEC;
// meanDoubleFromDeviceTime /= CLOCKS_PER_SEC;
//
// printf("For single-precision floating point, mean times for:\n");
// printf(" Copy to device: %f s\n", meanFloatToDeviceTime);
// printf(" Kernel execution: %f s\n", meanFloatKernelTime);
// printf(" Copy from device: %f s\n", meanFloatFromDeviceTime);
// printf("For double-precision floating point, mean times for:\n");
// printf(" Copy to device: %f s (%.2fx slower than single-precision)\n",
// meanDoubleToDeviceTime,
// meanDoubleToDeviceTime / meanFloatToDeviceTime);
// printf(" Kernel execution: %f s (%.2fx slower than single-precision)\n",
// meanDoubleKernelTime,
// meanDoubleKernelTime / meanFloatKernelTime);
// printf(" Copy from device: %f s (%.2fx slower than single-precision)\n",
// meanDoubleFromDeviceTime,
// meanDoubleFromDeviceTime / meanFloatFromDeviceTime);
//
// return 0;
//}
|
7351ec91f1399a8225bc03707a9ba5c312175b26.hip | // !!! This is a file automatically generated by hipify!!!
// This file contains the description of the "main" function and the declaration of the functions inside the "main_functions" file.
// In C++, the function called "main" is mandatory. The compiler starts by readind this function.
#include "common_hip.cuh"
#include "global_variables.cuh"
#include "utilities.cuh"
#include <conio.h>
#include <chrono>
// Declarations of the functions of the "main_functions.cu" file. I declared them here because these functions are only used in this file.
int fileReader(); // This function reads the file in the /props directoty
int initialize(); // This function makes everything necessary to initialize the simulation
int mainLoop(); // This is the main loop, calculates forces, positions and more.
void multiprocessor_writer(); // This function writes the .vtu files using multiple CPU cores
// The main function description starts here
int main(void)
{
float save_count = 0; // this variable is counting the time that passes on every iteration.
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // this variable stores the timestamp as soon as the execution starts
std::cout << "INITIALIZING...\n";
int rdr = fileReader();
// If the fileReader() function returns anything thats not 0, its an error and the execution must stop
if (rdr != 0) {
printf("\n\nERROR READING PROPS FILES\n\n");
_getch();
return 1;
}
int init = initialize();
std::chrono::high_resolution_clock::time_point init_end = std::chrono::high_resolution_clock::now(); // This variable stores the timestamp as soon as the initialization ends
auto init_time = std::chrono::duration_cast<std::chrono::seconds>(init_end - start).count();
std::cout << "It took " << init_time << " s to initialize\n"
<< "----------------------------------------------------------------\n\n";
// If the initialize() function returns anything thats not 0, its an error and the execution must stop
if (init != 0) {
printf("\n\nINITIALIZATION ERROR\n\n");
_getch();
return 1;
}
std::cout << "MAIN LOOP:\n" << "Progress:" << std::endl;
while (simulation_time < final_time)
{
displayProgress(start);
int main_loop = mainLoop();
// If the main_loop() function returns anything but 0, its an error and the execution must stop
if (main_loop != 0) {
printf("\n\nMAIN LOOP ERROR\n\n");
_getch();
return 1;
}
save_count += delta_t;
//writes files in every save_step defined in the system.txt file inside /props folder
if (save_count > save_steps / 1000) {
multiprocessor_writer();
save_count = fmod(simulation_time,(save_steps / 1000));
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); //stores a timestamp as soon as the execution ends
hipDeviceReset(); // deleting all used memory in this execution
std::cout << "\n\nIt took " << std::chrono::duration_cast<std::chrono::minutes>(end - start).count() << " minutes to execute this simulation.\n";
_getch(); //Makes
return 0;
} | 7351ec91f1399a8225bc03707a9ba5c312175b26.cu | // This file contains the description of the "main" function and the declaration of the functions inside the "main_functions" file.
// In C++, the function called "main" is mandatory. The compiler starts by readind this function.
#include "common.cuh"
#include "global_variables.cuh"
#include "utilities.cuh"
#include <conio.h>
#include <chrono>
// Declarations of the functions of the "main_functions.cu" file. I declared them here because these functions are only used in this file.
int fileReader(); // This function reads the file in the /props directoty
int initialize(); // This function makes everything necessary to initialize the simulation
int mainLoop(); // This is the main loop, calculates forces, positions and more.
void multiprocessor_writer(); // This function writes the .vtu files using multiple CPU cores
// The main function description starts here
int main(void)
{
float save_count = 0; // this variable is counting the time that passes on every iteration.
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // this variable stores the timestamp as soon as the execution starts
std::cout << "INITIALIZING...\n";
int rdr = fileReader();
// If the fileReader() function returns anything thats not 0, its an error and the execution must stop
if (rdr != 0) {
printf("\n\nERROR READING PROPS FILES\n\n");
_getch();
return 1;
}
int init = initialize();
std::chrono::high_resolution_clock::time_point init_end = std::chrono::high_resolution_clock::now(); // This variable stores the timestamp as soon as the initialization ends
auto init_time = std::chrono::duration_cast<std::chrono::seconds>(init_end - start).count();
std::cout << "It took " << init_time << " s to initialize\n"
<< "----------------------------------------------------------------\n\n";
// If the initialize() function returns anything thats not 0, its an error and the execution must stop
if (init != 0) {
printf("\n\nINITIALIZATION ERROR\n\n");
_getch();
return 1;
}
std::cout << "MAIN LOOP:\n" << "Progress:" << std::endl;
while (simulation_time < final_time)
{
displayProgress(start);
int main_loop = mainLoop();
// If the main_loop() function returns anything but 0, its an error and the execution must stop
if (main_loop != 0) {
printf("\n\nMAIN LOOP ERROR\n\n");
_getch();
return 1;
}
save_count += delta_t;
//writes files in every save_step defined in the system.txt file inside /props folder
if (save_count > save_steps / 1000) {
multiprocessor_writer();
save_count = fmod(simulation_time,(save_steps / 1000));
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); //stores a timestamp as soon as the execution ends
cudaDeviceReset(); // deleting all used memory in this execution
std::cout << "\n\nIt took " << std::chrono::duration_cast<std::chrono::minutes>(end - start).count() << " minutes to execute this simulation.\n";
_getch(); //Makes
return 0;
} |
39c44211d98b5c097c7b4fd0e3d7d917a3b3f698.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ __inline__ void exclusiveScan(int length, int *array) {{
const int threadidx = threadIdx.x;
// upsweep
for (int step = 1; step < length; step *= 2) {{
// based on step, compute the position of the two active elements
int idxA = (step - 1) + (2 * step * threadidx);
int idxB = idxA + step;
if(idxB < length) {{
array[idxB] += array[idxA];
}}
__syncthreads();
}}
if(threadidx == 0){{ array[length - 1] = 0; }}
__syncthreads();
// downsweep
for (int step = length/2; step >= 1; step /= 2) {{
// based on step, compute the position of the two active elements
int idxA = (step - 1) + (2 * step * threadidx);
int idxB = idxA + step;
if(idxB < length) {{
int hold = array[idxB];
array[idxB] += array[idxA];
array[idxA] = hold;
}}
__syncthreads();
}}
}}
__global__ void tensor(float *all_states, int *all_poss, int *all_goals){{
// MAX SHARED MEMORY: 49152 BYTES
__shared__ int bincounters[{nbins_rounded}]; // index i is number of agents in bin i
__shared__ int binpfx[{nbins_rounded}]; // prefix sums of the above (starts from 0)
__shared__ unsigned short bins[{nagents}][2]; // where the agent locations will actually be stored
const int threadidx = threadIdx.x;
const int nthreads = blockDim.x;
const int blockidx = blockIdx.x;
const int view_range = ({view_size} - 1);
const int displacement = {view_size} * {view_size};
const int statesize = displacement + 4;
float *states = &(all_states[blockidx * {nagents} * statesize]);
int *poss = &(all_poss[blockidx * {nagents} * 2]);
int *goals = &(all_goals[blockidx * {nagents} * 2]);
int start = threadidx;
int step = nthreads;
for(int i = start; i < {nbins_rounded}; i += step){{
bincounters[i] = 0;
binpfx[i] = 0;
}}
__syncthreads();
// For positions this thread is responsible for,
// determine its bin index
// increment that bin counter
// remember its place in that bin and its bin assignment
int localctr = 0; // counts loop iterations to index into myposs
int myposs[50][5]; // positions I am responsible for [0, 1] = r, c; [2, 3] = bin_idx, bin_pos; [4] = index in poss
int goal[2]; // goal value holder
for(int i = start; i < {nagents}; i += step){{
myposs[localctr][0] = poss[i * 2];
myposs[localctr][1] = poss[(i * 2) + 1];
goal[0] = goals[i * 2];
goal[1] = goals[(i * 2) + 1];
states[i * statesize + displacement] = (2 * ((float)myposs[localctr][0]) / {rows}) - 1;
states[i * statesize + displacement + 1] = (2 * ((float)myposs[localctr][1]) / {cols}) - 1;
states[i * statesize + displacement + 2] = (2 * ((float)goal[0]) / {rows}) - 1;
states[i * statesize + displacement + 3] = (2 * ((float)goal[1]) / {cols}) - 1;
int bin_idx = (myposs[localctr][0] / {binwidth}) * {binc} + (myposs[localctr][1] / {binwidth});
int bin_pos = atomicAdd(&(bincounters[bin_idx]), 1);
myposs[localctr][2] = bin_idx;
myposs[localctr][3] = bin_pos;
myposs[localctr][4] = i;
localctr++;
}}
__syncthreads();
// Perform parallel exlusive scan to accumulate bin sizes so we can index into bins
for(int i = start; i < {nbins_rounded}; i += step){{
binpfx[i] = bincounters[i];
}}
__syncthreads();
exclusiveScan({nbins_rounded}, binpfx);
// Populate bins with the agent positions I am responsible for
for(int i = 0; i < localctr; i++){{
int bin_idx = myposs[i][2];
int bin_pos = myposs[i][3];
int binsidx = binpfx[bin_idx] + bin_pos;
bins[binsidx][0] = (unsigned short) myposs[i][0];
bins[binsidx][1] = (unsigned short) myposs[i][1];
}}
__syncthreads();
// Render rest of state for agent positions I am responsible for
for(int i = 0; i < localctr; i++){{
// load my position in field and position in state vector
int myr = myposs[i][0];
int myc = myposs[i][1];
int myidx = myposs[i][4];
// Render -1 for walls and 0 for ground.
for(int viewr = 0; viewr < {view_size}; viewr++){{
for(int viewc = 0; viewc < {view_size}; viewc++){{
int fieldr = myr + viewr - view_range;
int fieldc = myc + viewc - view_range;
float fillval = 0.0;
if(fieldr < 0 || fieldc < 0 || fieldr >= {rows} || fieldc >= {cols})
fillval = -1.0;
int stateidx = myidx * statesize + viewr * {view_size} + viewc;
states[stateidx] = fillval;
}}
}}
// Determine receptive field overlap with bins
int TLR = max(0, (myr - view_range) / {binwidth});
int TLC = max(0, (myc - view_range) / {binwidth});
int BRR = min({binr} - 1, (myr + view_range) / {binwidth});
int BRC = min({binc} - 1, (myc + view_range) / {binwidth});
// Iterate over each agent in all possible bins
for(int binr = TLR; binr <= BRR; binr++){{
for(int binc = TLC; binc <= BRC; binc++){{
int bin_idx = binr * {binc} + binc;
int left = binpfx[bin_idx];
int right = binpfx[bin_idx] + bincounters[bin_idx];
for(int binsidx = left; binsidx < right; binsidx++){{
int otherr = (int)(bins[binsidx][0]); // location in field
int otherc = (int)(bins[binsidx][1]);
int viewr = otherr - myr + view_range;
int viewc = otherc - myc + view_range;
// If if statement passes, then other agent is in view range.
if(0 <= viewr && viewr < {view_size} && 0 <= viewc && viewc < {view_size}){{
int stateidx = myidx * statesize + viewr * {view_size} + viewc;
states[stateidx] = 1.0;
}}
}}
}}
}}
}}
}} | 39c44211d98b5c097c7b4fd0e3d7d917a3b3f698.cu | __device__ __inline__ void exclusiveScan(int length, int *array) {{
const int threadidx = threadIdx.x;
// upsweep
for (int step = 1; step < length; step *= 2) {{
// based on step, compute the position of the two active elements
int idxA = (step - 1) + (2 * step * threadidx);
int idxB = idxA + step;
if(idxB < length) {{
array[idxB] += array[idxA];
}}
__syncthreads();
}}
if(threadidx == 0){{ array[length - 1] = 0; }}
__syncthreads();
// downsweep
for (int step = length/2; step >= 1; step /= 2) {{
// based on step, compute the position of the two active elements
int idxA = (step - 1) + (2 * step * threadidx);
int idxB = idxA + step;
if(idxB < length) {{
int hold = array[idxB];
array[idxB] += array[idxA];
array[idxA] = hold;
}}
__syncthreads();
}}
}}
__global__ void tensor(float *all_states, int *all_poss, int *all_goals){{
// MAX SHARED MEMORY: 49152 BYTES
__shared__ int bincounters[{nbins_rounded}]; // index i is number of agents in bin i
__shared__ int binpfx[{nbins_rounded}]; // prefix sums of the above (starts from 0)
__shared__ unsigned short bins[{nagents}][2]; // where the agent locations will actually be stored
const int threadidx = threadIdx.x;
const int nthreads = blockDim.x;
const int blockidx = blockIdx.x;
const int view_range = ({view_size} - 1);
const int displacement = {view_size} * {view_size};
const int statesize = displacement + 4;
float *states = &(all_states[blockidx * {nagents} * statesize]);
int *poss = &(all_poss[blockidx * {nagents} * 2]);
int *goals = &(all_goals[blockidx * {nagents} * 2]);
int start = threadidx;
int step = nthreads;
for(int i = start; i < {nbins_rounded}; i += step){{
bincounters[i] = 0;
binpfx[i] = 0;
}}
__syncthreads();
// For positions this thread is responsible for,
// determine its bin index
// increment that bin counter
// remember its place in that bin and its bin assignment
int localctr = 0; // counts loop iterations to index into myposs
int myposs[50][5]; // positions I am responsible for [0, 1] = r, c; [2, 3] = bin_idx, bin_pos; [4] = index in poss
int goal[2]; // goal value holder
for(int i = start; i < {nagents}; i += step){{
myposs[localctr][0] = poss[i * 2];
myposs[localctr][1] = poss[(i * 2) + 1];
goal[0] = goals[i * 2];
goal[1] = goals[(i * 2) + 1];
states[i * statesize + displacement] = (2 * ((float)myposs[localctr][0]) / {rows}) - 1;
states[i * statesize + displacement + 1] = (2 * ((float)myposs[localctr][1]) / {cols}) - 1;
states[i * statesize + displacement + 2] = (2 * ((float)goal[0]) / {rows}) - 1;
states[i * statesize + displacement + 3] = (2 * ((float)goal[1]) / {cols}) - 1;
int bin_idx = (myposs[localctr][0] / {binwidth}) * {binc} + (myposs[localctr][1] / {binwidth});
int bin_pos = atomicAdd(&(bincounters[bin_idx]), 1);
myposs[localctr][2] = bin_idx;
myposs[localctr][3] = bin_pos;
myposs[localctr][4] = i;
localctr++;
}}
__syncthreads();
// Perform parallel exlusive scan to accumulate bin sizes so we can index into bins
for(int i = start; i < {nbins_rounded}; i += step){{
binpfx[i] = bincounters[i];
}}
__syncthreads();
exclusiveScan({nbins_rounded}, binpfx);
// Populate bins with the agent positions I am responsible for
for(int i = 0; i < localctr; i++){{
int bin_idx = myposs[i][2];
int bin_pos = myposs[i][3];
int binsidx = binpfx[bin_idx] + bin_pos;
bins[binsidx][0] = (unsigned short) myposs[i][0];
bins[binsidx][1] = (unsigned short) myposs[i][1];
}}
__syncthreads();
// Render rest of state for agent positions I am responsible for
for(int i = 0; i < localctr; i++){{
// load my position in field and position in state vector
int myr = myposs[i][0];
int myc = myposs[i][1];
int myidx = myposs[i][4];
// Render -1 for walls and 0 for ground.
for(int viewr = 0; viewr < {view_size}; viewr++){{
for(int viewc = 0; viewc < {view_size}; viewc++){{
int fieldr = myr + viewr - view_range;
int fieldc = myc + viewc - view_range;
float fillval = 0.0;
if(fieldr < 0 || fieldc < 0 || fieldr >= {rows} || fieldc >= {cols})
fillval = -1.0;
int stateidx = myidx * statesize + viewr * {view_size} + viewc;
states[stateidx] = fillval;
}}
}}
// Determine receptive field overlap with bins
int TLR = max(0, (myr - view_range) / {binwidth});
int TLC = max(0, (myc - view_range) / {binwidth});
int BRR = min({binr} - 1, (myr + view_range) / {binwidth});
int BRC = min({binc} - 1, (myc + view_range) / {binwidth});
// Iterate over each agent in all possible bins
for(int binr = TLR; binr <= BRR; binr++){{
for(int binc = TLC; binc <= BRC; binc++){{
int bin_idx = binr * {binc} + binc;
int left = binpfx[bin_idx];
int right = binpfx[bin_idx] + bincounters[bin_idx];
for(int binsidx = left; binsidx < right; binsidx++){{
int otherr = (int)(bins[binsidx][0]); // location in field
int otherc = (int)(bins[binsidx][1]);
int viewr = otherr - myr + view_range;
int viewc = otherc - myc + view_range;
// If if statement passes, then other agent is in view range.
if(0 <= viewr && viewr < {view_size} && 0 <= viewc && viewc < {view_size}){{
int stateidx = myidx * statesize + viewr * {view_size} + viewc;
states[stateidx] = 1.0;
}}
}}
}}
}}
}}
}} |
21065a133613519db643b88e8d3fb5d3da13e20f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
hipStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
syncStream(); // Make sure I've finished computing before broadcasting
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0 ? 1 : 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
if (_type != "pass") {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// comptued bprop activities for them when in fact I've only called a function which
// will eventually compute them.
syncStream();
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(hipStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
hipStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
_hostMemFwd.copyFromHost(replicaDataMatrix, true);
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
| 21065a133613519db643b88e8d3fb5d3da13e20f.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
cudaStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
syncStream(); // Make sure I've finished computing before broadcasting
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0 ? 1 : 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
if (_type != "pass") {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// comptued bprop activities for them when in fact I've only called a function which
// will eventually compute them.
syncStream();
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(cudaStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
cudaStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
_hostMemFwd.copyFromHost(replicaDataMatrix, true);
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
|
109a512c4649666fde95134abbea725a7a6df5eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include "gtest/gtest.h"
#include <utilities/type_dispatcher.hpp>
#include <thrust/device_vector.h>
#include <cudf.h>
#include "tests/utilities/cudf_test_fixtures.h"
/**
* @file dispatcher_test.cu
* @brief Tests the type_dispatcher
*/
namespace{
struct test_functor {
template <typename T>
__host__ __device__
bool operator()(gdf_dtype type_id) {
switch (type_id) {
case GDF_INT8:
return (std::is_same<T, int8_t>::value);
case GDF_INT16:
return (std::is_same<T, int16_t>::value);
case GDF_INT32:
return (std::is_same<T, int32_t>::value);
case GDF_INT64:
return (std::is_same<T, int64_t>::value);
case GDF_FLOAT32:
return (std::is_same<T, float>::value);
case GDF_FLOAT64:
return (std::is_same<T, double>::value);
case GDF_CATEGORY:
return (std::is_same<T, cudf::category>::value);
case GDF_TIMESTAMP:
return (std::is_same<T, cudf::timestamp>::value);
case GDF_DATE32:
return (std::is_same<T, cudf::date32>::value);
case GDF_DATE64:
return (std::is_same<T, cudf::date64>::value);
default:
return (false);
}
}
};
__global__
void dispatch_test_kernel(gdf_dtype type, bool * d_result)
{
if(0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(type, test_functor{}, type);
}
} // anonymous namespace
struct DispatcherTest : public GdfTest
{
std::vector<gdf_dtype> dtype_enums{
GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64, GDF_FLOAT32,
GDF_FLOAT64, GDF_DATE32, GDF_DATE64, GDF_TIMESTAMP, GDF_CATEGORY};
};
TEST_F(DispatcherTest, HostDispatchFunctor)
{
for (auto const &t : this->dtype_enums) {
bool result = cudf::type_dispatcher(t, test_functor{}, t);
EXPECT_TRUE(result);
}
}
TEST_F(DispatcherTest, DeviceDispatchFunctor)
{
thrust::device_vector<bool> result(1);
for (auto const& t : this->dtype_enums) {
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1),dim3(1), 0, 0, t, result.data().get());
hipDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
}
// These tests excerise the `assert(false)` on unsupported dtypes in the type_dispatcher
// The assert is only present if the NDEBUG macro isn't defined
#ifndef NDEBUG
// Unsuported gdf_dtypes should cause program to exit
TEST(DispatcherDeathTest, UnsuportedTypesTest)
{
testing::FLAGS_gtest_death_test_style="threadsafe";
std::vector<gdf_dtype> unsupported_types{ GDF_invalid, GDF_STRING, N_GDF_TYPES};
for (auto const &t : unsupported_types) {
EXPECT_DEATH(cudf::type_dispatcher(t, test_functor{}, t), "");
}
}
// Unsuported gdf_dtypes in device code should set appropriate error code
// and invalidates device context
TEST(DispatcherDeathTest, DeviceDispatchFunctor)
{
testing::FLAGS_gtest_death_test_style="threadsafe";
std::vector<gdf_dtype> unsupported_types{ GDF_invalid, GDF_STRING, N_GDF_TYPES};
thrust::device_vector<bool> result(1);
auto call_kernel = [&result](gdf_dtype t) {
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, t, result.data().get());
auto error_code = hipDeviceSynchronize();
// Kernel should fail with `hipErrorAssert` on an unsupported gdf_dtype
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
EXPECT_EQ(hipErrorAssert, error_code);
exit(-1);
};
for (auto const& t : unsupported_types) {
EXPECT_DEATH(call_kernel(t), "");
}
}
#endif
| 109a512c4649666fde95134abbea725a7a6df5eb.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include "gtest/gtest.h"
#include <utilities/type_dispatcher.hpp>
#include <thrust/device_vector.h>
#include <cudf.h>
#include "tests/utilities/cudf_test_fixtures.h"
/**
* @file dispatcher_test.cu
* @brief Tests the type_dispatcher
*/
namespace{
struct test_functor {
template <typename T>
__host__ __device__
bool operator()(gdf_dtype type_id) {
switch (type_id) {
case GDF_INT8:
return (std::is_same<T, int8_t>::value);
case GDF_INT16:
return (std::is_same<T, int16_t>::value);
case GDF_INT32:
return (std::is_same<T, int32_t>::value);
case GDF_INT64:
return (std::is_same<T, int64_t>::value);
case GDF_FLOAT32:
return (std::is_same<T, float>::value);
case GDF_FLOAT64:
return (std::is_same<T, double>::value);
case GDF_CATEGORY:
return (std::is_same<T, cudf::category>::value);
case GDF_TIMESTAMP:
return (std::is_same<T, cudf::timestamp>::value);
case GDF_DATE32:
return (std::is_same<T, cudf::date32>::value);
case GDF_DATE64:
return (std::is_same<T, cudf::date64>::value);
default:
return (false);
}
}
};
__global__
void dispatch_test_kernel(gdf_dtype type, bool * d_result)
{
if(0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(type, test_functor{}, type);
}
} // anonymous namespace
struct DispatcherTest : public GdfTest
{
std::vector<gdf_dtype> dtype_enums{
GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64, GDF_FLOAT32,
GDF_FLOAT64, GDF_DATE32, GDF_DATE64, GDF_TIMESTAMP, GDF_CATEGORY};
};
TEST_F(DispatcherTest, HostDispatchFunctor)
{
for (auto const &t : this->dtype_enums) {
bool result = cudf::type_dispatcher(t, test_functor{}, t);
EXPECT_TRUE(result);
}
}
TEST_F(DispatcherTest, DeviceDispatchFunctor)
{
thrust::device_vector<bool> result(1);
for (auto const& t : this->dtype_enums) {
dispatch_test_kernel<<<1,1>>>(t, result.data().get());
cudaDeviceSynchronize();
EXPECT_EQ(true, result[0]);
}
}
// These tests excerise the `assert(false)` on unsupported dtypes in the type_dispatcher
// The assert is only present if the NDEBUG macro isn't defined
#ifndef NDEBUG
// Unsuported gdf_dtypes should cause program to exit
TEST(DispatcherDeathTest, UnsuportedTypesTest)
{
testing::FLAGS_gtest_death_test_style="threadsafe";
std::vector<gdf_dtype> unsupported_types{ GDF_invalid, GDF_STRING, N_GDF_TYPES};
for (auto const &t : unsupported_types) {
EXPECT_DEATH(cudf::type_dispatcher(t, test_functor{}, t), "");
}
}
// Unsuported gdf_dtypes in device code should set appropriate error code
// and invalidates device context
TEST(DispatcherDeathTest, DeviceDispatchFunctor)
{
testing::FLAGS_gtest_death_test_style="threadsafe";
std::vector<gdf_dtype> unsupported_types{ GDF_invalid, GDF_STRING, N_GDF_TYPES};
thrust::device_vector<bool> result(1);
auto call_kernel = [&result](gdf_dtype t) {
dispatch_test_kernel<<<1, 1>>>(t, result.data().get());
auto error_code = cudaDeviceSynchronize();
// Kernel should fail with `cudaErrorAssert` on an unsupported gdf_dtype
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
EXPECT_EQ(cudaErrorAssert, error_code);
exit(-1);
};
for (auto const& t : unsupported_types) {
EXPECT_DEATH(call_kernel(t), "");
}
}
#endif
|
436c26b194777e77eae5859a2a40e85b8a0f2cee.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDAapsp.cu
* This is a parallel implementation of the Floyd-Warshall algorithm, for
* finding shortest paths in a weighted graph with positive or negative
* edge weights (without negative cycles). A single execution of the
* algorithm can find the lengths (summed weights) of shortest paths
* between all pairs of vertices.
*
* A random graph is generated, modeled by a NxN array of weights
* between the graph vertices. The missing edges between vertices are
* implemented as weights above the weight limit w.
*
* The implementation uses CUDA.
* For every block, BLOCK_SIZExBLOCK_SIZE CUDA threads are used for a
* total of NxN CUDA threds. Every thread is responsible for a single
* cell of the Floyd-Warshall distance array.
* The number of blocks is variable but the number of threads is constant.
*
* The CUDA contains the host function:
* oneOneNo()
*
* and the kernel function:
* floydWarshall_p1(float *dev_dist,size_t pitch,int n)
*
* The CUDA part uses only global memory and not shared memory!
Command line arguments:
n = the number of vertices into the graph
w = the max weight between vertices
p = the probability of generating edge
*/
/*
------- ----------------------
Brouzos Rafael [email protected] www.github.com/bronzeRaf
-----------------------------
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define BLOCK_SIZE 16
//global variables
struct timeval startwtime, endwtime;
double seq_time;
int n,w;
float **a,**dist,**tesd,p;
//declare functions
void makeAdjacency();
void hostAlloc();
void init();
void floydWarshall_serial();
void oneOneNo();
void tester();
void initTest();
__global__ void floydWarshall_p1(float *dev_dist,size_t pitch,int en);
__global__ void floydWarshall_p2(float *dev_dist,size_t pitch,int en);
int main(int argc, char **argv){
//check arguments
if (argc != 4) {
printf("non acceptable input error\n");
exit(3); //error code 3 if arqs less or more
}
n= 1<<atoi(argv[1]);
w= atoi(argv[2]);
p= atof(argv[3]);
printf("n w p serial OneOneNo\n");
printf("%d %d %f ",n,w,p);
hostAlloc();
makeAdjacency();
gettimeofday (&startwtime, NULL);
floydWarshall_serial();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("%f ", seq_time);
initTest();
gettimeofday (&startwtime, NULL);
oneOneNo();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("%f\n", seq_time);
free(tesd);
free(dist);
free(a);
}
/**
Makes adjacency matrix a(1:n,1:n) where a edge is generated with
probability p and random edge weights (0:w).
Instead of infity (if vertexes unconnected) we put a value over w
**/
void makeAdjacency(){
int i,j;
float ran;
srand(time(NULL)); //initializing rand()
for(i=0;i<n;i++){
for(j=0;j<n;j++){
ran=((float)rand())/(float)(RAND_MAX); //random float [0,1]
//check if i--->j vertexes conected
if(ran>p){
//if not connected weight is out of the limit
a[i][j]=w+100;
}
else{
ran=((float)rand())/(float)(RAND_MAX); //random float [0,1]
a[i][j]=ran*w; //random float [0,w]
}
}
//i-->i weight 0
a[i][i]=0;
}
}
/**
Applies the Floy-Warshall algorithm into the graph
**/
void floydWarshall_serial(){
int i, j, k;
float temp;
//init dist
init();
//main algorithm
for(k=0;k<n;k++){
for(i=0;i<n;i++){
for(j=0;j<n;j++){
temp=dist[i][k]+dist[k][j];
if(dist[i][j]>temp){
dist[i][j]=temp;
}
}
}
}
}
/**
Allocates memory for weight and distance arrays
**/
void hostAlloc(){
int i;
a=(float **) malloc(n*sizeof(float*));
dist=(float **) malloc(n*sizeof(float*));
tesd=(float **) malloc(n*sizeof(float*));
for(i=0;i<n;i++){
a[i]=(float *) malloc(n*sizeof(float));
dist[i]=(float *) malloc(n*sizeof(float));
tesd[i]=(float *) malloc(n*sizeof(float));
}
}
/**
initializing distance array with weight values
**/
void init(){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
dist[i][j]=a[i][j];//simple weight initialization of dist array (over the w limit is infinity)
}
dist[i][i]=0;//vertex from itself distance(weight) is 0
}
}
/**
Host Function for executing 1 cell per thread without shared memory (host function)
**/
void oneOneNo(){
//init dist
init();
float *dev_dist; //device memory dist
size_t pitch;
//memory allocation in device memory
hipMallocPitch(&dev_dist, &pitch, n * sizeof(float), n);
//copy dist array to global memory at device
hipMemcpy2D(dev_dist,pitch,dist,n*sizeof(float),n*sizeof(float),n,hipMemcpyHostToDevice);
//call kernel
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE);//threads per block = BLOCK_SIZE^2
dim3 numBlocks(n / threadsPerBlock.x, n / threadsPerBlock.y);//blocks per grid
hipLaunchKernelGGL(( floydWarshall_p1), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_dist, pitch, n);//call kernel
hipDeviceSynchronize();
//get results from device to host memory
hipMemcpy2D(dist,n*sizeof(float),dev_dist,pitch,n*sizeof(float),n,hipMemcpyDeviceToHost);
//we have results (minimun weight path) in dist array
hipFree(dev_dist);
}
/**
Kernel Function for executing 1 cell per thread without shared memory (host function)
**/
__global__ void floydWarshall_p1(float *dev_dist,size_t pitch,int en){
float temp, d1, d2, *row;
int k;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<en && j<en){
for(k=0;k<en;k++){
row = (float*)((char*)dev_dist + i*pitch);
d1=row[k]; //=dist[i][k]
row = (float*)((char*)dev_dist + k*pitch);
d2=row[j]; //=dist[k][j]
row = (float*)((char*)dev_dist + i*pitch);
temp=d1+d2;
if(row[j]>temp){
row[j]=temp; //=dist[i][j]
}
}
}
__syncthreads();
}
/**
Initializes test array with distance values. It makes a clone of the
serial distance array for testing and validation
**/
void initTest(){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
tesd[i][j]=dist[i][j];
}
}
}
/**
It tests every cell of the parallel distance array
with the serial one to test and validate results
**/
void tester(){
int i,j,flag=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(dist[i][j] != tesd[i][j]){
flag=1;
break;
}
}
if(flag==1){
printf("ALERT'''''''''''''different than serial'''''''''''''\n");
break;
}
}
if(flag==0)
printf("everything ok in test\n");
}
| 436c26b194777e77eae5859a2a40e85b8a0f2cee.cu | /*
CUDAapsp.cu
* This is a parallel implementation of the Floyd-Warshall algorithm, for
* finding shortest paths in a weighted graph with positive or negative
* edge weights (without negative cycles). A single execution of the
* algorithm can find the lengths (summed weights) of shortest paths
* between all pairs of vertices.
*
* A random graph is generated, modeled by a NxN array of weights
* between the graph vertices. The missing edges between vertices are
* implemented as weights above the weight limit w.
*
* The implementation uses CUDA.
* For every block, BLOCK_SIZExBLOCK_SIZE CUDA threads are used for a
* total of NxN CUDA threds. Every thread is responsible for a single
* cell of the Floyd-Warshall distance array.
* The number of blocks is variable but the number of threads is constant.
*
* The CUDA contains the host function:
* oneOneNo()
*
* and the kernel function:
* floydWarshall_p1(float *dev_dist,size_t pitch,int n)
*
* The CUDA part uses only global memory and not shared memory!
Command line arguments:
n = the number of vertices into the graph
w = the max weight between vertices
p = the probability of generating edge
*/
/*
------- ----------------------
Brouzos Rafael [email protected] www.github.com/bronzeRaf
-----------------------------
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define BLOCK_SIZE 16
//global variables
struct timeval startwtime, endwtime;
double seq_time;
int n,w;
float **a,**dist,**tesd,p;
//declare functions
void makeAdjacency();
void hostAlloc();
void init();
void floydWarshall_serial();
void oneOneNo();
void tester();
void initTest();
__global__ void floydWarshall_p1(float *dev_dist,size_t pitch,int en);
__global__ void floydWarshall_p2(float *dev_dist,size_t pitch,int en);
int main(int argc, char **argv){
//check arguments
if (argc != 4) {
printf("non acceptable input error\n");
exit(3); //error code 3 if arqs less or more
}
n= 1<<atoi(argv[1]);
w= atoi(argv[2]);
p= atof(argv[3]);
printf("n w p serial OneOneNo\n");
printf("%d %d %f ",n,w,p);
hostAlloc();
makeAdjacency();
gettimeofday (&startwtime, NULL);
floydWarshall_serial();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("%f ", seq_time);
initTest();
gettimeofday (&startwtime, NULL);
oneOneNo();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("%f\n", seq_time);
free(tesd);
free(dist);
free(a);
}
/**
Makes adjacency matrix a(1:n,1:n) where a edge is generated with
probability p and random edge weights (0:w).
Instead of infity (if vertexes unconnected) we put a value over w
**/
void makeAdjacency(){
int i,j;
float ran;
srand(time(NULL)); //initializing rand()
for(i=0;i<n;i++){
for(j=0;j<n;j++){
ran=((float)rand())/(float)(RAND_MAX); //random float [0,1]
//check if i--->j vertexes conected
if(ran>p){
//if not connected weight is out of the limit
a[i][j]=w+100;
}
else{
ran=((float)rand())/(float)(RAND_MAX); //random float [0,1]
a[i][j]=ran*w; //random float [0,w]
}
}
//i-->i weight 0
a[i][i]=0;
}
}
/**
Applies the Floy-Warshall algorithm into the graph
**/
void floydWarshall_serial(){
int i, j, k;
float temp;
//init dist
init();
//main algorithm
for(k=0;k<n;k++){
for(i=0;i<n;i++){
for(j=0;j<n;j++){
temp=dist[i][k]+dist[k][j];
if(dist[i][j]>temp){
dist[i][j]=temp;
}
}
}
}
}
/**
Allocates memory for weight and distance arrays
**/
void hostAlloc(){
int i;
a=(float **) malloc(n*sizeof(float*));
dist=(float **) malloc(n*sizeof(float*));
tesd=(float **) malloc(n*sizeof(float*));
for(i=0;i<n;i++){
a[i]=(float *) malloc(n*sizeof(float));
dist[i]=(float *) malloc(n*sizeof(float));
tesd[i]=(float *) malloc(n*sizeof(float));
}
}
/**
initializing distance array with weight values
**/
void init(){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
dist[i][j]=a[i][j];//simple weight initialization of dist array (over the w limit is infinity)
}
dist[i][i]=0;//vertex from itself distance(weight) is 0
}
}
/**
Host Function for executing 1 cell per thread without shared memory (host function)
**/
void oneOneNo(){
//init dist
init();
float *dev_dist; //device memory dist
size_t pitch;
//memory allocation in device memory
cudaMallocPitch(&dev_dist, &pitch, n * sizeof(float), n);
//copy dist array to global memory at device
cudaMemcpy2D(dev_dist,pitch,dist,n*sizeof(float),n*sizeof(float),n,cudaMemcpyHostToDevice);
//call kernel
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE);//threads per block = BLOCK_SIZE^2
dim3 numBlocks(n / threadsPerBlock.x, n / threadsPerBlock.y);//blocks per grid
floydWarshall_p1<<<numBlocks, threadsPerBlock>>>(dev_dist, pitch, n);//call kernel
cudaDeviceSynchronize();
//get results from device to host memory
cudaMemcpy2D(dist,n*sizeof(float),dev_dist,pitch,n*sizeof(float),n,cudaMemcpyDeviceToHost);
//we have results (minimun weight path) in dist array
cudaFree(dev_dist);
}
/**
Kernel Function for executing 1 cell per thread without shared memory (host function)
**/
__global__ void floydWarshall_p1(float *dev_dist,size_t pitch,int en){
float temp, d1, d2, *row;
int k;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<en && j<en){
for(k=0;k<en;k++){
row = (float*)((char*)dev_dist + i*pitch);
d1=row[k]; //=dist[i][k]
row = (float*)((char*)dev_dist + k*pitch);
d2=row[j]; //=dist[k][j]
row = (float*)((char*)dev_dist + i*pitch);
temp=d1+d2;
if(row[j]>temp){
row[j]=temp; //=dist[i][j]
}
}
}
__syncthreads();
}
/**
Initializes test array with distance values. It makes a clone of the
serial distance array for testing and validation
**/
void initTest(){
int i,j;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
tesd[i][j]=dist[i][j];
}
}
}
/**
It tests every cell of the parallel distance array
with the serial one to test and validate results
**/
void tester(){
int i,j,flag=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(dist[i][j] != tesd[i][j]){
flag=1;
break;
}
}
if(flag==1){
printf("ALERT'''''''''''''different than serial'''''''''''''\n");
break;
}
}
if(flag==0)
printf("everything ok in test\n");
}
|
7527c24827844ae8868a96e26c5e21647848603a.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
/*******************/
/* iDivUp FUNCTION */
/*******************/
extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpu_assert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(hipError_t ans) { gpu_assert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
static const char *_cudaGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if(CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
| 7527c24827844ae8868a96e26c5e21647848603a.cu | /*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include <cuda.h>
#include <cusolverDn.h>
/*******************/
/* iDivUp FUNCTION */
/*******************/
extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpu_assert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(cudaError_t ans) { gpu_assert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
static const char *_cudaGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if(CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
|
bbb31ac5d5d42b83221bea5998fd9319453a2258.hip | // !!! This is a file automatically generated by hipify!!!
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <rocblas.h>
#include "matmult_transfer_gpu.h"
extern "C" {
// MULTI_GPU split A version - hiding overlap.
void matmult_gpu3(int m, int n, int k, double *A_in, double *B, double *C_in)
{
omp_set_nested(1);
long size_A = m * k * sizeof(double);
long size_B = k * n * sizeof(double);
long size_C = m * n * sizeof(double);
int numDevices = 1;
//hipGetDeviceCount(&numDevices);
m /= numDevices;
size_A /= numDevices;
size_C /= numDevices;
#pragma omp parallel for firstprivate(m, size_A, size_C)
for (int device = 0; device < numDevices; device++) {
double *A = A_in + m * k * device;
double *C = C_in + m * n * device;
// Allocate on device
double *d_A, *d_B, *d_C;
hipSetDevice(device);
allocate_on_gpu(m, n, k, &d_A, &d_B, &d_C);
// Transfer B to device
checkCudaErrors(hipMemcpy(d_B, B, size_B, hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(d_C, 0, size_C));
int numSplits = 8;
m /= numSplits;
size_A /= numSplits;
size_C /= numSplits;
#pragma omp parallel for
for (int split = 0; split < numSplits; split++) {
hipSetDevice(device);
hipblasHandle_t handle;
hipblasCreate(&handle);
checkCudaErrors(hipMemcpyAsync(d_A + m * k * split, A + m * k * split, size_A, hipMemcpyHostToDevice));
double time0 = omp_get_wtime();
const double alpha = 1.0;
const double beta = 0.0;
hipblasSetStream(handle, cudaStreamPerThread);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, d_B, n, d_A + m * k * split, k, &beta, d_C + m * n * split, n); // Row major.
checkCudaErrors(hipMemcpyAsync(C + m * n * split, d_C + m * n * split, size_C, hipMemcpyDeviceToHost, cudaStreamPerThread));
checkCudaErrors(hipStreamSynchronize(cudaStreamPerThread));
time0 = omp_get_wtime() - time0; printf("Computing C = A * B | %5.4f s %5.4f Gflops\n", time0, 2.0 * m * n * k * 1e-9 / time0);
hipblasDestroy(handle);
}
free_on_gpu(d_A, d_B, d_C);
}
}
}
| bbb31ac5d5d42b83221bea5998fd9319453a2258.cu | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cublas_v2.h>
#include "matmult_transfer_gpu.h"
extern "C" {
// MULTI_GPU split A version - hiding overlap.
void matmult_gpu3(int m, int n, int k, double *A_in, double *B, double *C_in)
{
omp_set_nested(1);
long size_A = m * k * sizeof(double);
long size_B = k * n * sizeof(double);
long size_C = m * n * sizeof(double);
int numDevices = 1;
//cudaGetDeviceCount(&numDevices);
m /= numDevices;
size_A /= numDevices;
size_C /= numDevices;
#pragma omp parallel for firstprivate(m, size_A, size_C)
for (int device = 0; device < numDevices; device++) {
double *A = A_in + m * k * device;
double *C = C_in + m * n * device;
// Allocate on device
double *d_A, *d_B, *d_C;
cudaSetDevice(device);
allocate_on_gpu(m, n, k, &d_A, &d_B, &d_C);
// Transfer B to device
checkCudaErrors(cudaMemcpy(d_B, B, size_B, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(d_C, 0, size_C));
int numSplits = 8;
m /= numSplits;
size_A /= numSplits;
size_C /= numSplits;
#pragma omp parallel for
for (int split = 0; split < numSplits; split++) {
cudaSetDevice(device);
cublasHandle_t handle;
cublasCreate(&handle);
checkCudaErrors(cudaMemcpyAsync(d_A + m * k * split, A + m * k * split, size_A, cudaMemcpyHostToDevice));
double time0 = omp_get_wtime();
const double alpha = 1.0;
const double beta = 0.0;
cublasSetStream(handle, cudaStreamPerThread);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, d_B, n, d_A + m * k * split, k, &beta, d_C + m * n * split, n); // Row major.
checkCudaErrors(cudaMemcpyAsync(C + m * n * split, d_C + m * n * split, size_C, cudaMemcpyDeviceToHost, cudaStreamPerThread));
checkCudaErrors(cudaStreamSynchronize(cudaStreamPerThread));
time0 = omp_get_wtime() - time0; printf("Computing C = A * B | %5.4f s %5.4f Gflops\n", time0, 2.0 * m * n * k * 1e-9 / time0);
cublasDestroy(handle);
}
free_on_gpu(d_A, d_B, d_C);
}
}
}
|
8aba0cfd1fb7e5453cbb2a71e3c10435ebdc4bc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include <THH/THHApply.cuh>
template <typename T>
struct ELUupdateOutput_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateOutput_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *output, const T *input) const
{
*output = *input <= 0 ? (exp(*input * negiptcoef_) - 1) * negcoef_ : *input * poscoef_;
}
};
// in-place variant
template <typename T>
struct ELUupdateOutputIP_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateOutputIP_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *x) const
{
*x = *x <= 0 ? (exp(*x * negiptcoef_) - 1) * negcoef_ : *x * poscoef_;
}
};
template <typename T>
struct ELUupdateGradInput_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateGradInput_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = (*output) <= 0 ? (*gradOutput * negiptcoef_ * (*output + negcoef_)) : (*gradOutput * poscoef_);
}
};
#include "generic/ELU.cu"
#include "THHGenerateFloatTypes.h"
| 8aba0cfd1fb7e5453cbb2a71e3c10435ebdc4bc0.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include <THC/THCApply.cuh>
template <typename T>
struct ELUupdateOutput_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateOutput_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *output, const T *input) const
{
*output = *input <= 0 ? (exp(*input * negiptcoef_) - 1) * negcoef_ : *input * poscoef_;
}
};
// in-place variant
template <typename T>
struct ELUupdateOutputIP_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateOutputIP_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *x) const
{
*x = *x <= 0 ? (exp(*x * negiptcoef_) - 1) * negcoef_ : *x * poscoef_;
}
};
template <typename T>
struct ELUupdateGradInput_functor
{
const T negcoef_;
const T poscoef_;
const T negiptcoef_;
ELUupdateGradInput_functor(T negcoef, T poscoef, T negiptcoef)
: negcoef_(negcoef)
, poscoef_(poscoef)
, negiptcoef_(negiptcoef)
{}
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = (*output) <= 0 ? (*gradOutput * negiptcoef_ * (*output + negcoef_)) : (*gradOutput * poscoef_);
}
};
#include "generic/ELU.cu"
#include "THCGenerateFloatTypes.h"
|
765c697bd8c53b783f62791e1b9e26563af13199.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmdotc.cu, normal z -> s, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define REAL
// dot product for multiple vectors
__global__ void
magma_smdotc1_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// block reduction for 1 vectors
__global__ void
magma_smdotc1_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] +=
( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
/**
Purpose
-------
Computes the scalar product of a set of 1 vectors such that
skp[0] = [ <v_0,w_0> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc1(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (local_block_size) * sizeof( float ); // 1 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_smdotc1_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_smdotc1_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 1 , aux1, 1, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 2 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc2_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<2; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc2_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 2 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 2 vectors
__global__ void
magma_smdotc2_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 2 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc2(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_smdotc2_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_smdotc2_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 2 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 3 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc3_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<3; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc3_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * v2,
float * w2,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 3 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 3 vectors
__global__ void
magma_smdotc3_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<3; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
v2 magmaFloat_ptr
input vector
@param[in]
w2 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc3(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr v2,
magmaFloat_ptr w2,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 3 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
// magma_smdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n );
hipLaunchKernelGGL(( magma_smdotc3_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, v2, w2, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_smdotc3_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 3 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 4 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc4_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<4; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc4_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * v2,
float * w2,
float * v3,
float * w3,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 4 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_S_ZERO;
temp[ Idx + 3*blockDim.x ] = ( i < n ) ?
v3[ i ] * w3[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 4 vectors
__global__ void
magma_smdotc4_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<4; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
v2 magmaFloat_ptr
input vector
@param[in]
w2 magmaFloat_ptr
input vector
@param[in]
v3 magmaFloat_ptr
input vector
@param[in]
w3 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc4(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr v2,
magmaFloat_ptr w2,
magmaFloat_ptr v3,
magmaFloat_ptr w3,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_smdotc4_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_smdotc4_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 4 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
| 765c697bd8c53b783f62791e1b9e26563af13199.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmdotc.cu, normal z -> s, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define REAL
// dot product for multiple vectors
__global__ void
magma_smdotc1_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// block reduction for 1 vectors
__global__ void
magma_smdotc1_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] +=
( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
/**
Purpose
-------
Computes the scalar product of a set of 1 vectors such that
skp[0] = [ <v_0,w_0> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc1(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (local_block_size) * sizeof( float ); // 1 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_smdotc1_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_smdotc1_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 1 , aux1, 1, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 2 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc2_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<2; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc2_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 2 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 2 vectors
__global__ void
magma_smdotc2_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 2 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc2(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_smdotc2_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_smdotc2_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 2 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 3 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc3_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<3; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc3_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * v2,
float * w2,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 3 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 3 vectors
__global__ void
magma_smdotc3_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<3; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
v2 magmaFloat_ptr
input vector
@param[in]
w2 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc3(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr v2,
magmaFloat_ptr w2,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 3 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
// magma_smdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n );
magma_smdotc3_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, v2, w2, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_smdotc3_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 3 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 4 dot products //
// initialize arrays with zero
__global__ void
magma_smdotc4_gpumemzero(
float * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<4; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_smdotc4_kernel_1(
int Gs,
int n,
float * v0,
float * w0,
float * v1,
float * w1,
float * v2,
float * w2,
float * v3,
float * w3,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 4 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_S_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_S_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_S_ZERO;
temp[ Idx + 3*blockDim.x ] = ( i < n ) ?
v3[ i ] * w3[ i ] : MAGMA_S_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 4 vectors
__global__ void
magma_smdotc4_kernel_2(
int Gs,
int n,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<4; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloat_ptr
input vector
@param[in]
w0 magmaFloat_ptr
input vector
@param[in]
v1 magmaFloat_ptr
input vector
@param[in]
w1 magmaFloat_ptr
input vector
@param[in]
v2 magmaFloat_ptr
input vector
@param[in]
w2 magmaFloat_ptr
input vector
@param[in]
v3 magmaFloat_ptr
input vector
@param[in]
w3 magmaFloat_ptr
input vector
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc4(
magma_int_t n,
magmaFloat_ptr v0,
magmaFloat_ptr w0,
magmaFloat_ptr v1,
magmaFloat_ptr w1,
magmaFloat_ptr v2,
magmaFloat_ptr w2,
magmaFloat_ptr v3,
magmaFloat_ptr w3,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4 * (local_block_size) * sizeof( float ); // 4 skp
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_smdotc4_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_smdotc4_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_sgetvector( 4 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
|
28a4562f46c3e325533df72aaf45efd8ac416984.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "caffe/util/binary_kernels.hpp"
namespace caffe
{
#define BLOCK_SIZE 16
// CUDA tutorial: http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
template <typename Dtype>
__global__ void gemm(Dtype *A, Dtype *alpha, Dtype *B, Dtype *C, int m, int n, int k)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread block computes one sub-matrix Csub of C
Dtype *Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol];
// Shared memory used to store Asub and Bsub respectively
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Each thread computes one element of Csub
// by accumulating results into Cvalue
// block_size = 16 -> 256 threads, one per Csub element
Dtype Cvalue = 0.0;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int i = 0; i < (n / BLOCK_SIZE); ++i)
{
// Get sub-matrix Asub of A
Dtype *Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i];
// Get sub-matrix Bsub of B
Dtype *Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[row * n + col];
Bs[row][col] = Bsub[row * k + col];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[row][j] * Bs[j][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
if (col + blockCol * BLOCK_SIZE < k && row + blockRow * BLOCK_SIZE < m)
Csub[row * k + col] = alpha[row + blockRow * BLOCK_SIZE]*Cvalue;
}
// 32 single float array -> 32 bits unsigned int
__device__ unsigned int concatenate(float *array)
{
unsigned int rvalue = 0;
unsigned int sign;
for (int i = 0; i < 32; i++)
{
sign = (array[i] >= 0);
rvalue = rvalue | (sign << i);
}
return rvalue;
}
__global__ void concatenate_rows_kernel(float *a, unsigned int *b, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
b[i] = concatenate(&a[i * 32]);
}
__global__ void concatenate_cols_kernel(float *a, unsigned int *b, int m, int n)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n)
{
__shared__ float array[32];
for (int i = 0; i < m; i += 32)
{
for (int k = 0; k < 32; k++)
array[k] = a[j + n * (i + k)];
b[j + n * i / 32] = concatenate(array);
}
}
}
// 32 bits unsigned int -> 32 single float array
// TODO: the array allocation should not be done here
__device__ float *deconcatenate(unsigned int x)
{
float *array = new float[32];
for (int i = 0; i < 32; i++)
{
array[i] = (x & (1 << i)) >> i;
}
return array;
}
__global__ void deconcatenate_rows_kernel(unsigned int *a, float *b, int size)
{
float *array;
for (int i = 0; i < size; i += 32)
{
array = deconcatenate(a[i / 32]);
for (int k = 0; k < 32; k++)
b[i + k] = array[k];
delete[] array;
}
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
__global__ void xnor_gemm(unsigned int *A, float *fA, unsigned int *B, float *C, int m, int n, int k)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread block computes one sub-matrix Csub of C
float *Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol];
// Shared memory used to store Asub and Bsub respectively
__shared__ unsigned int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Each thread computes one element of Csub
// by accumulating results into Cvalue
// block_size = 16 -> 256 threads, one per Csub element
unsigned int Cvalue = 0;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int i = 0; i < (n / BLOCK_SIZE); ++i)
{
// Get sub-matrix Asub of A
unsigned int *Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i];
// Get sub-matrix Bsub of B
unsigned int *Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[row * n + col];
Bs[row][col] = Bsub[row * k + col];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
// THIS IS THE MOST INTERESTING PART
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += __popc(As[row][j] ^ Bs[j][col]);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
if (col + blockCol * BLOCK_SIZE < k && row + blockRow * BLOCK_SIZE < m)
Csub[row * k + col] = fA[row + blockRow * BLOCK_SIZE] * (-(2 * (float)Cvalue - 32 * n));
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
__global__ void xnor_gemm2(const int nThreads, unsigned int *A, float *fA, unsigned int *B, float *C, int m, int n, int k)
{
CUDA_KERNEL_LOOP(index, nThreads) {
const int row = index / k;
const int col = index % k;
unsigned int Cvalue = 0;
for (int t = 0; t < n; t++)
{
Cvalue += __popc(A[row*n+t] ^ B[t*k+col]);
}
C[index] = fA[row] * (-(2 * (float)Cvalue - 32 * n));
}
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
template <>
void xnor_gemm(const float *fw, const float* fA, const float *fB, float *fC,
unsigned int *uiA, unsigned int *uiB, int m, int n, int k)
{
//LOG(INFO) << "m=" << m << ", n=" << n << ", k=" << k;
CHECK_EQ(n % 32, 0) << "n must be div by 32";
int block = 64, grid = m * n / (block * 32) + 1;
concatenate_rows_kernel << <grid, block >> >(const_cast<float*>(fw), uiA, m * n / 32);
grid = k / block + 1;
concatenate_cols_kernel << <grid, block >> >(const_cast<float*>(fB), uiB, n, k);
/*dim3 blockDim(16, 16);
dim3 gridDim(k / 16 + 1, m / 16 + 1);
xnor_gemm << <gridDim, blockDim >> >(uiA, const_cast<float*>(fA), uiB, fC, m, n / 32, k);*/
xnor_gemm2 << <CAFFE_GET_BLOCKS(m*k), CAFFE_CUDA_NUM_THREADS >> >(m*k, uiA, const_cast<float*>(fA), uiB, fC, m, n / 32, k);
}
template <>
void xnor_gemm(const double *fw, const double *fA, const double *fB, double *fC,
unsigned int *uiA, unsigned int *uiB, int m, int n, int k)
{
dim3 blockDim(16, 16);
dim3 gridDim(k / 16 + 1, m / 16 + 1);
gemm<double> << <gridDim, blockDim >> >(const_cast<double*>(fw), const_cast<double*>(fA), const_cast<double*>(fB), fC, m, n, k);
}
} // namespace caffe | 28a4562f46c3e325533df72aaf45efd8ac416984.cu | #include <stdio.h>
#include "caffe/util/binary_kernels.hpp"
namespace caffe
{
#define BLOCK_SIZE 16
// CUDA tutorial: http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
template <typename Dtype>
__global__ void gemm(Dtype *A, Dtype *alpha, Dtype *B, Dtype *C, int m, int n, int k)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread block computes one sub-matrix Csub of C
Dtype *Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol];
// Shared memory used to store Asub and Bsub respectively
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Each thread computes one element of Csub
// by accumulating results into Cvalue
// block_size = 16 -> 256 threads, one per Csub element
Dtype Cvalue = 0.0;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int i = 0; i < (n / BLOCK_SIZE); ++i)
{
// Get sub-matrix Asub of A
Dtype *Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i];
// Get sub-matrix Bsub of B
Dtype *Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[row * n + col];
Bs[row][col] = Bsub[row * k + col];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += As[row][j] * Bs[j][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
if (col + blockCol * BLOCK_SIZE < k && row + blockRow * BLOCK_SIZE < m)
Csub[row * k + col] = alpha[row + blockRow * BLOCK_SIZE]*Cvalue;
}
// 32 single float array -> 32 bits unsigned int
__device__ unsigned int concatenate(float *array)
{
unsigned int rvalue = 0;
unsigned int sign;
for (int i = 0; i < 32; i++)
{
sign = (array[i] >= 0);
rvalue = rvalue | (sign << i);
}
return rvalue;
}
__global__ void concatenate_rows_kernel(float *a, unsigned int *b, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
b[i] = concatenate(&a[i * 32]);
}
__global__ void concatenate_cols_kernel(float *a, unsigned int *b, int m, int n)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n)
{
__shared__ float array[32];
for (int i = 0; i < m; i += 32)
{
for (int k = 0; k < 32; k++)
array[k] = a[j + n * (i + k)];
b[j + n * i / 32] = concatenate(array);
}
}
}
// 32 bits unsigned int -> 32 single float array
// TODO: the array allocation should not be done here
__device__ float *deconcatenate(unsigned int x)
{
float *array = new float[32];
for (int i = 0; i < 32; i++)
{
array[i] = (x & (1 << i)) >> i;
}
return array;
}
__global__ void deconcatenate_rows_kernel(unsigned int *a, float *b, int size)
{
float *array;
for (int i = 0; i < size; i += 32)
{
array = deconcatenate(a[i / 32]);
for (int k = 0; k < 32; k++)
b[i + k] = array[k];
delete[] array;
}
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
__global__ void xnor_gemm(unsigned int *A, float *fA, unsigned int *B, float *C, int m, int n, int k)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread block computes one sub-matrix Csub of C
float *Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol];
// Shared memory used to store Asub and Bsub respectively
__shared__ unsigned int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Each thread computes one element of Csub
// by accumulating results into Cvalue
// block_size = 16 -> 256 threads, one per Csub element
unsigned int Cvalue = 0;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int i = 0; i < (n / BLOCK_SIZE); ++i)
{
// Get sub-matrix Asub of A
unsigned int *Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i];
// Get sub-matrix Bsub of B
unsigned int *Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[row * n + col];
Bs[row][col] = Bsub[row * k + col];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
// THIS IS THE MOST INTERESTING PART
for (int j = 0; j < BLOCK_SIZE; ++j)
Cvalue += __popc(As[row][j] ^ Bs[j][col]);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
if (col + blockCol * BLOCK_SIZE < k && row + blockRow * BLOCK_SIZE < m)
Csub[row * k + col] = fA[row + blockRow * BLOCK_SIZE] * (-(2 * (float)Cvalue - 32 * n));
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
__global__ void xnor_gemm2(const int nThreads, unsigned int *A, float *fA, unsigned int *B, float *C, int m, int n, int k)
{
CUDA_KERNEL_LOOP(index, nThreads) {
const int row = index / k;
const int col = index % k;
unsigned int Cvalue = 0;
for (int t = 0; t < n; t++)
{
Cvalue += __popc(A[row*n+t] ^ B[t*k+col]);
}
C[index] = fA[row] * (-(2 * (float)Cvalue - 32 * n));
}
}
// A is shape (m,n), B is shape (n,k) and C is shape (m,k)
template <>
void xnor_gemm(const float *fw, const float* fA, const float *fB, float *fC,
unsigned int *uiA, unsigned int *uiB, int m, int n, int k)
{
//LOG(INFO) << "m=" << m << ", n=" << n << ", k=" << k;
CHECK_EQ(n % 32, 0) << "n must be div by 32";
int block = 64, grid = m * n / (block * 32) + 1;
concatenate_rows_kernel << <grid, block >> >(const_cast<float*>(fw), uiA, m * n / 32);
grid = k / block + 1;
concatenate_cols_kernel << <grid, block >> >(const_cast<float*>(fB), uiB, n, k);
/*dim3 blockDim(16, 16);
dim3 gridDim(k / 16 + 1, m / 16 + 1);
xnor_gemm << <gridDim, blockDim >> >(uiA, const_cast<float*>(fA), uiB, fC, m, n / 32, k);*/
xnor_gemm2 << <CAFFE_GET_BLOCKS(m*k), CAFFE_CUDA_NUM_THREADS >> >(m*k, uiA, const_cast<float*>(fA), uiB, fC, m, n / 32, k);
}
template <>
void xnor_gemm(const double *fw, const double *fA, const double *fB, double *fC,
unsigned int *uiA, unsigned int *uiB, int m, int n, int k)
{
dim3 blockDim(16, 16);
dim3 gridDim(k / 16 + 1, m / 16 + 1);
gemm<double> << <gridDim, blockDim >> >(const_cast<double*>(fw), const_cast<double*>(fA), const_cast<double*>(fB), fC, m, n, k);
}
} // namespace caffe |
af741a7266ac9f36f635fc7ac5b5b07c31ac0729.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "prelu.h"
namespace nvinfer1
{
PReluPlugin::PReluPlugin(const std::vector<float>& gamma) : gamma_(gamma)
{
}
PReluPlugin::~PReluPlugin()
{
}
// create the plugin at runtime from a byte stream
PReluPlugin::PReluPlugin(const void* data, size_t length)
{
char *p = (char*)data;
input_size_ = reinterpret_cast<const int*>(p)[0];
p += sizeof(int);
gamma_.assign((float*)p, (float*)p + (length - sizeof(int)) / sizeof(float));
}
void PReluPlugin::serialize(void* buffer) const TRT_NOEXCEPT
{
*reinterpret_cast<int*>(buffer) = input_size_;
char *p = reinterpret_cast<char*>(buffer);
p += sizeof(int);
memcpy(p, gamma_.data(), gamma_.size() * sizeof(float));
}
size_t PReluPlugin::getSerializationSize() const TRT_NOEXCEPT
{
return sizeof(input_size_) + gamma_.size() * sizeof(float);
}
int PReluPlugin::initialize() TRT_NOEXCEPT
{
return 0;
}
Dims PReluPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT
{
assert(nbInputDims == 1);
assert(index == 0);
input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2];
// Output dimensions
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void PReluPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT
{
mPluginNamespace = pluginNamespace;
}
const char* PReluPlugin::getPluginNamespace() const TRT_NOEXCEPT
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType PReluPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool PReluPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool PReluPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT
{
return false;
}
void PReluPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void PReluPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT
{
}
// Detach the plugin object from its execution context.
void PReluPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* PReluPlugin::getPluginType() const TRT_NOEXCEPT
{
return "PRelu_TRT";
}
const char* PReluPlugin::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
void PReluPlugin::destroy() TRT_NOEXCEPT
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* PReluPlugin::clone() const TRT_NOEXCEPT
{
PReluPlugin *p = new PReluPlugin(gamma_);
p->input_size_ = input_size_;
p->setPluginNamespace(mPluginNamespace);
return p;
}
__global__ void prelu_kernel(const float *input, float *output, int num_elem, int input_size, int fm_size, const float* gamma) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
if (input[idx] >= 0.0f) {
output[idx] = input[idx];
return;
}
int c = (idx % input_size) / fm_size;
output[idx] = input[idx] * gamma[c];
}
void PReluPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
int block_size = thread_count_;
int grid_size = (input_size_ * batchSize + block_size - 1) / block_size;
void *dev_gamma;
assert(hipMalloc(&dev_gamma, sizeof(float) * gamma_.size()) == hipSuccess);
assert(hipMemcpy(dev_gamma, gamma_.data(), sizeof(float) * gamma_.size(), hipMemcpyHostToDevice) == hipSuccess);
hipLaunchKernelGGL(( prelu_kernel), dim3(grid_size), dim3(block_size), 0, 0, inputs[0], output, input_size_ * batchSize, input_size_, input_size_ / gamma_.size(), (const float*)dev_gamma);
assert(hipFree(dev_gamma) == hipSuccess);
}
int PReluPlugin::enqueue(int batchSize, const void*const * inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection PReluPluginCreator::mFC{};
std::vector<PluginField> PReluPluginCreator::mPluginAttributes;
PReluPluginCreator::PReluPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("gamma", nullptr, PluginFieldType::kFLOAT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* PReluPluginCreator::getPluginName() const TRT_NOEXCEPT
{
return "PRelu_TRT";
}
const char* PReluPluginCreator::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
const PluginFieldCollection* PReluPluginCreator::getFieldNames() TRT_NOEXCEPT
{
return &mFC;
}
IPluginV2IOExt* PReluPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT
{
std::vector<float> gamma;
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i) {
const char* attrName = fields[i].name;
if (!strcmp(attrName, "gamma")) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
gamma.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
gamma.push_back(*w);
w++;
}
}
}
PReluPlugin* obj = new PReluPlugin(gamma);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* PReluPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
PReluPlugin* obj = new PReluPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| af741a7266ac9f36f635fc7ac5b5b07c31ac0729.cu | #include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "prelu.h"
namespace nvinfer1
{
PReluPlugin::PReluPlugin(const std::vector<float>& gamma) : gamma_(gamma)
{
}
PReluPlugin::~PReluPlugin()
{
}
// create the plugin at runtime from a byte stream
PReluPlugin::PReluPlugin(const void* data, size_t length)
{
char *p = (char*)data;
input_size_ = reinterpret_cast<const int*>(p)[0];
p += sizeof(int);
gamma_.assign((float*)p, (float*)p + (length - sizeof(int)) / sizeof(float));
}
void PReluPlugin::serialize(void* buffer) const TRT_NOEXCEPT
{
*reinterpret_cast<int*>(buffer) = input_size_;
char *p = reinterpret_cast<char*>(buffer);
p += sizeof(int);
memcpy(p, gamma_.data(), gamma_.size() * sizeof(float));
}
size_t PReluPlugin::getSerializationSize() const TRT_NOEXCEPT
{
return sizeof(input_size_) + gamma_.size() * sizeof(float);
}
int PReluPlugin::initialize() TRT_NOEXCEPT
{
return 0;
}
Dims PReluPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT
{
assert(nbInputDims == 1);
assert(index == 0);
input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2];
// Output dimensions
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void PReluPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT
{
mPluginNamespace = pluginNamespace;
}
const char* PReluPlugin::getPluginNamespace() const TRT_NOEXCEPT
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType PReluPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool PReluPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool PReluPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT
{
return false;
}
void PReluPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void PReluPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT
{
}
// Detach the plugin object from its execution context.
void PReluPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* PReluPlugin::getPluginType() const TRT_NOEXCEPT
{
return "PRelu_TRT";
}
const char* PReluPlugin::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
void PReluPlugin::destroy() TRT_NOEXCEPT
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* PReluPlugin::clone() const TRT_NOEXCEPT
{
PReluPlugin *p = new PReluPlugin(gamma_);
p->input_size_ = input_size_;
p->setPluginNamespace(mPluginNamespace);
return p;
}
__global__ void prelu_kernel(const float *input, float *output, int num_elem, int input_size, int fm_size, const float* gamma) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
if (input[idx] >= 0.0f) {
output[idx] = input[idx];
return;
}
int c = (idx % input_size) / fm_size;
output[idx] = input[idx] * gamma[c];
}
void PReluPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
int block_size = thread_count_;
int grid_size = (input_size_ * batchSize + block_size - 1) / block_size;
void *dev_gamma;
assert(cudaMalloc(&dev_gamma, sizeof(float) * gamma_.size()) == cudaSuccess);
assert(cudaMemcpy(dev_gamma, gamma_.data(), sizeof(float) * gamma_.size(), cudaMemcpyHostToDevice) == cudaSuccess);
prelu_kernel<<<grid_size, block_size>>>(inputs[0], output, input_size_ * batchSize, input_size_, input_size_ / gamma_.size(), (const float*)dev_gamma);
assert(cudaFree(dev_gamma) == cudaSuccess);
}
int PReluPlugin::enqueue(int batchSize, const void*const * inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection PReluPluginCreator::mFC{};
std::vector<PluginField> PReluPluginCreator::mPluginAttributes;
PReluPluginCreator::PReluPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("gamma", nullptr, PluginFieldType::kFLOAT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* PReluPluginCreator::getPluginName() const TRT_NOEXCEPT
{
return "PRelu_TRT";
}
const char* PReluPluginCreator::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
const PluginFieldCollection* PReluPluginCreator::getFieldNames() TRT_NOEXCEPT
{
return &mFC;
}
IPluginV2IOExt* PReluPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT
{
std::vector<float> gamma;
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i) {
const char* attrName = fields[i].name;
if (!strcmp(attrName, "gamma")) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
gamma.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
gamma.push_back(*w);
w++;
}
}
}
PReluPlugin* obj = new PReluPlugin(gamma);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* PReluPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
PReluPlugin* obj = new PReluPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
eff90c31b874738cfd78072734367e34b88ba241.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolve_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int in_w = 1;
int in_h = 1;
int in_c = 1;
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
int pad = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolve_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,weights,output,in_w,in_h,in_c,n,size,pad);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolve_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,weights,output,in_w,in_h,in_c,n,size,pad);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolve_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,weights,output,in_w,in_h,in_c,n,size,pad);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eff90c31b874738cfd78072734367e34b88ba241.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolve_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int in_w = 1;
int in_h = 1;
int in_c = 1;
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
int pad = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolve_gpu_kernel<<<gridBlock,threadBlock>>>(input,weights,output,in_w,in_h,in_c,n,size,pad);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolve_gpu_kernel<<<gridBlock,threadBlock>>>(input,weights,output,in_w,in_h,in_c,n,size,pad);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolve_gpu_kernel<<<gridBlock,threadBlock>>>(input,weights,output,in_w,in_h,in_c,n,size,pad);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
da5c70d8638eebef13f3604d512a68979830e8ec.hip | // !!! This is a file automatically generated by hipify!!!
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#include <hip/device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_vector_types.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define N 33
/*
realiza la transpuesta de una matriz
*/
// definicio de memoria constante CUDA
__constant__ float dev_A[N][N];
//GLOBAL: func desde el host y ejecutada en el kernel(DEVICE)
__global__ void transpuesta(float *dev_B)
{
int columna = threadIdx.x;
int fila = threadIdx.y;
int pos = columna + N * fila;
// cada hilo coloca un elemento de la matriz final
dev_B[pos] = dev_A[columna][fila];
}
int main(int argc, char** argv)
{
float *hst_A, *hst_B;
float *dev_B;
int size = N * N * sizeof(float);
//reserva de memoria en el host
hst_A = (float*)malloc(size);
hst_B = (float*)malloc(size);
//reserva de memoria en el device
hipMalloc((void**)&dev_B, size);
//llenar la matriz
for (int i = 0; i < N*N; i++)
{
hst_A[i] = float(i) + 1;
}
//copiar los datos hacia el device
hipError_t error = hipMemcpyToSymbol(dev_A, hst_A, size);
if (error != hipSuccess) {
printf("Error Memori const\n");
}
//dimensiones del kernel a lanzar
dim3 bloques(1);
dim3 hilos(N, N);
//lanzamiento del kernel
hipLaunchKernelGGL(( transpuesta) , dim3(bloques), dim3(hilos) , 0, 0, dev_B);
//recoger los datos
hipMemcpy(hst_B, dev_B, size, hipMemcpyDeviceToHost);
//impresion de los datos
printf("Matriz original:\n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_A[j + i * N]);
}
printf("\n");
}
printf("Matriz transpuesta:\n");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_B[j + i * N]);
}
printf("\n");
}
//
printf("\n pulsa INTRO para salir:\n");
fflush(stdin);
char tecla = getchar();
return 0;
}
| da5c70d8638eebef13f3604d512a68979830e8ec.cu | /*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#include <device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector_types.h>
#include <cuda.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 33
/*
realiza la transpuesta de una matriz
*/
// definicio de memoria constante CUDA
__constant__ float dev_A[N][N];
//GLOBAL: func desde el host y ejecutada en el kernel(DEVICE)
__global__ void transpuesta(float *dev_B)
{
int columna = threadIdx.x;
int fila = threadIdx.y;
int pos = columna + N * fila;
// cada hilo coloca un elemento de la matriz final
dev_B[pos] = dev_A[columna][fila];
}
int main(int argc, char** argv)
{
float *hst_A, *hst_B;
float *dev_B;
int size = N * N * sizeof(float);
//reserva de memoria en el host
hst_A = (float*)malloc(size);
hst_B = (float*)malloc(size);
//reserva de memoria en el device
cudaMalloc((void**)&dev_B, size);
//llenar la matriz
for (int i = 0; i < N*N; i++)
{
hst_A[i] = float(i) + 1;
}
//copiar los datos hacia el device
cudaError_t error = cudaMemcpyToSymbol(dev_A, hst_A, size);
if (error != cudaSuccess) {
printf("Error Memori const\n");
}
//dimensiones del kernel a lanzar
dim3 bloques(1);
dim3 hilos(N, N);
//lanzamiento del kernel
transpuesta <<<bloques, hilos >>> (dev_B);
//recoger los datos
cudaMemcpy(hst_B, dev_B, size, cudaMemcpyDeviceToHost);
//impresion de los datos
printf("Matriz original:\n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_A[j + i * N]);
}
printf("\n");
}
printf("Matriz transpuesta:\n");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_B[j + i * N]);
}
printf("\n");
}
//
printf("\n pulsa INTRO para salir:\n");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
d56abe6f5e32266827d5dc2fc76adc43582d5bb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void checkPrime(int * d_in)
{
//get thread id
int id = blockIdx.x + threadIdx.x;
//num is now in local memory, much quicker
int num = d_in[id];
//couple of corner cases
if(num == 0 || num == 1)
{
d_in[id] = 0;
return;
}
//assume prime until proven otherwise
d_in[id] = 1;
//quicker to check 2 on its own
//then we can count up in 2s (only need to check odd numbers) starting from 3
if(num % 2 == 0)
{
d_in[id] = 0;
}
else
{
//only need to check upto ceil of sqrt(num)
//better to start from 3 and count up rather than down
//do sqrt here not in loop to stop it being evaluated each time round
int sqrtNum = (int)sqrt((float)num);
for(int i = 3; i < sqrtNum + 1; i += 2)
{
if(num % i == 0)
{
d_in[id] = 0;
return;
}
}
}
}
int main(int argc, char ** argv)
{
//anything over 1000000 crashes it, not sure why
//possibly to do with my vram
const int ARRAY_SIZE = 100000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
//generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = i;
}
//declare GPU memory pointers
int * d_in;
//allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
//transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//launch the kernel
//not sure what the best ratio of blocks to threads is
hipLaunchKernelGGL(( checkPrime), dim3(ARRAY_SIZE/100), dim3(100), 0, 0, d_in);
//copy back the result array to the CPU
hipMemcpy(h_in, d_in, ARRAY_BYTES, hipMemcpyDeviceToHost);
//print out the resulting array of primes
for (int i = 0; i < ARRAY_SIZE; i++)
{
if(h_in[i])
printf("%d\n", i);
}
hipFree(d_in);
return 0;
} | d56abe6f5e32266827d5dc2fc76adc43582d5bb9.cu | #include <stdio.h>
__global__ void checkPrime(int * d_in)
{
//get thread id
int id = blockIdx.x + threadIdx.x;
//num is now in local memory, much quicker
int num = d_in[id];
//couple of corner cases
if(num == 0 || num == 1)
{
d_in[id] = 0;
return;
}
//assume prime until proven otherwise
d_in[id] = 1;
//quicker to check 2 on its own
//then we can count up in 2s (only need to check odd numbers) starting from 3
if(num % 2 == 0)
{
d_in[id] = 0;
}
else
{
//only need to check upto ceil of sqrt(num)
//better to start from 3 and count up rather than down
//do sqrt here not in loop to stop it being evaluated each time round
int sqrtNum = (int)sqrt((float)num);
for(int i = 3; i < sqrtNum + 1; i += 2)
{
if(num % i == 0)
{
d_in[id] = 0;
return;
}
}
}
}
int main(int argc, char ** argv)
{
//anything over 1000000 crashes it, not sure why
//possibly to do with my vram
const int ARRAY_SIZE = 100000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
//generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = i;
}
//declare GPU memory pointers
int * d_in;
//allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
//transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//launch the kernel
//not sure what the best ratio of blocks to threads is
checkPrime<<<ARRAY_SIZE/100, 100>>>(d_in);
//copy back the result array to the CPU
cudaMemcpy(h_in, d_in, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print out the resulting array of primes
for (int i = 0; i < ARRAY_SIZE; i++)
{
if(h_in[i])
printf("%d\n", i);
}
cudaFree(d_in);
return 0;
} |
705108017f67a80dbbc4ca586999401bbc5352db.hip | // !!! This is a file automatically generated by hipify!!!
#include <random>
#include <vector>
#include <tuple>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <algorithm>
#include "SyncedMemory.h"
#include "Timer.h"
#include "counting.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
template <typename Engine>
tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) {
poisson_distribution<int> pd(14.0);
bernoulli_distribution bd(0.1);
uniform_int_distribution<int> id1(1, 20);
uniform_int_distribution<int> id2(1, 5);
uniform_int_distribution<int> id3('a', 'z');
tuple<vector<char>, vector<int>, vector<int>> ret;
auto &text = get<0>(ret);
auto &pos = get<1>(ret);
auto &head = get<2>(ret);
auto gen_rand_word_len = [&] () -> int {
return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0)));
};
auto gen_rand_space_len = [&] () -> int {
return id2(eng);
};
auto gen_rand_char = [&] () {
return id3(eng);
};
auto AddWord = [&] () {
head.push_back(text.size());
int n = gen_rand_word_len();
for (int i = 0; i < n; ++i) {
text.push_back(gen_rand_char());
pos.push_back(i+1);
}
};
auto AddSpace = [&] () {
int n = gen_rand_space_len();
for (int i = 0; i < n; ++i) {
text.push_back('\n');
pos.push_back(0);
}
};
AddWord();
while (text.size() < N) {
AddSpace();
AddWord();
}
return ret;
}
int main(int argc, char **argv)
{
// Initialize random text
default_random_engine engine(12345);
auto text_pos_head = GenerateTestCase(engine, 400000); // 40 MB data
vector<char> &text = get<0>(text_pos_head);
vector<int> &pos = get<1>(text_pos_head);
vector<int> &head = get<2>(text_pos_head);
// Prepare buffers
int n = text.size();
char *text_gpu;
hipMalloc(&text_gpu, sizeof(char)*n);
SyncedMemory<char> text_sync(text.data(), text_gpu, n);
text_sync.get_cpu_wo(); // touch the cpu data
MemoryBuffer<int> pos_yours(n), head_yours(n);
auto pos_yours_sync = pos_yours.CreateSync(n);
auto head_yours_sync = head_yours.CreateSync(n);
// Create timers
Timer timer_count_position;
// Part I
timer_count_position.Start();
int *pos_yours_gpu = pos_yours_sync.get_gpu_wo();
hipMemset(pos_yours_gpu, 0, sizeof(int)*n);
CountPosition(text_sync.get_gpu_ro(), pos_yours_gpu, n);
CHECK;
timer_count_position.Pause();
printf_timer(timer_count_position);
// Part I check
const int *golden = pos.data();
const int *yours = pos_yours_sync.get_cpu_ro();
int n_match1 = mismatch(golden, golden+n, yours).first - golden;
if (n_match1 != n) {
puts("Part I WA!");
copy_n(golden, n, pos_yours_sync.get_cpu_wo());
}
// Part II
int *head_yours_gpu = head_yours_sync.get_gpu_wo();
hipMemset(head_yours_gpu, 0, sizeof(int)*n);
int n_head = ExtractHead(pos_yours_sync.get_gpu_ro(), head_yours_gpu, n);
CHECK;
// Part II check
do {
if (n_head != head.size()) {
n_head = head.size();
puts("Part II WA (wrong number of heads)!");
} else {
int n_match2 = mismatch(head.begin(), head.end(), head_yours_sync.get_cpu_ro()).first - head.begin();
if (n_match2 != n_head) {
puts("Part II WA (wrong heads)!");
} else {
break;
}
}
copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo());
} while(false);
// Part III
// Do whatever your want
Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head);
CHECK;
hipFree(text_gpu);
return 0;
}
| 705108017f67a80dbbc4ca586999401bbc5352db.cu | #include <random>
#include <vector>
#include <tuple>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <algorithm>
#include "SyncedMemory.h"
#include "Timer.h"
#include "counting.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
template <typename Engine>
tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) {
poisson_distribution<int> pd(14.0);
bernoulli_distribution bd(0.1);
uniform_int_distribution<int> id1(1, 20);
uniform_int_distribution<int> id2(1, 5);
uniform_int_distribution<int> id3('a', 'z');
tuple<vector<char>, vector<int>, vector<int>> ret;
auto &text = get<0>(ret);
auto &pos = get<1>(ret);
auto &head = get<2>(ret);
auto gen_rand_word_len = [&] () -> int {
return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0)));
};
auto gen_rand_space_len = [&] () -> int {
return id2(eng);
};
auto gen_rand_char = [&] () {
return id3(eng);
};
auto AddWord = [&] () {
head.push_back(text.size());
int n = gen_rand_word_len();
for (int i = 0; i < n; ++i) {
text.push_back(gen_rand_char());
pos.push_back(i+1);
}
};
auto AddSpace = [&] () {
int n = gen_rand_space_len();
for (int i = 0; i < n; ++i) {
text.push_back('\n');
pos.push_back(0);
}
};
AddWord();
while (text.size() < N) {
AddSpace();
AddWord();
}
return ret;
}
int main(int argc, char **argv)
{
// Initialize random text
default_random_engine engine(12345);
auto text_pos_head = GenerateTestCase(engine, 400000); // 40 MB data
vector<char> &text = get<0>(text_pos_head);
vector<int> &pos = get<1>(text_pos_head);
vector<int> &head = get<2>(text_pos_head);
// Prepare buffers
int n = text.size();
char *text_gpu;
cudaMalloc(&text_gpu, sizeof(char)*n);
SyncedMemory<char> text_sync(text.data(), text_gpu, n);
text_sync.get_cpu_wo(); // touch the cpu data
MemoryBuffer<int> pos_yours(n), head_yours(n);
auto pos_yours_sync = pos_yours.CreateSync(n);
auto head_yours_sync = head_yours.CreateSync(n);
// Create timers
Timer timer_count_position;
// Part I
timer_count_position.Start();
int *pos_yours_gpu = pos_yours_sync.get_gpu_wo();
cudaMemset(pos_yours_gpu, 0, sizeof(int)*n);
CountPosition(text_sync.get_gpu_ro(), pos_yours_gpu, n);
CHECK;
timer_count_position.Pause();
printf_timer(timer_count_position);
// Part I check
const int *golden = pos.data();
const int *yours = pos_yours_sync.get_cpu_ro();
int n_match1 = mismatch(golden, golden+n, yours).first - golden;
if (n_match1 != n) {
puts("Part I WA!");
copy_n(golden, n, pos_yours_sync.get_cpu_wo());
}
// Part II
int *head_yours_gpu = head_yours_sync.get_gpu_wo();
cudaMemset(head_yours_gpu, 0, sizeof(int)*n);
int n_head = ExtractHead(pos_yours_sync.get_gpu_ro(), head_yours_gpu, n);
CHECK;
// Part II check
do {
if (n_head != head.size()) {
n_head = head.size();
puts("Part II WA (wrong number of heads)!");
} else {
int n_match2 = mismatch(head.begin(), head.end(), head_yours_sync.get_cpu_ro()).first - head.begin();
if (n_match2 != n_head) {
puts("Part II WA (wrong heads)!");
} else {
break;
}
}
copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo());
} while(false);
// Part III
// Do whatever your want
Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head);
CHECK;
cudaFree(text_gpu);
return 0;
}
|
320f1318e123504e8b7e1ce7eb839b3d6d81787c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "finishCentroids_64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *centroidMass = NULL;
hipMalloc(¢roidMass, XSIZE*YSIZE);
unsigned int *centroidCount = NULL;
hipMalloc(¢roidCount, XSIZE*YSIZE);
float *centroids = NULL;
hipMalloc(¢roids, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
finishCentroids_64), dim3(gridBlock),dim3(threadBlock), 0, 0, centroidMass,centroidCount,centroids);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
finishCentroids_64), dim3(gridBlock),dim3(threadBlock), 0, 0, centroidMass,centroidCount,centroids);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
finishCentroids_64), dim3(gridBlock),dim3(threadBlock), 0, 0, centroidMass,centroidCount,centroids);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 320f1318e123504e8b7e1ce7eb839b3d6d81787c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "finishCentroids_64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *centroidMass = NULL;
cudaMalloc(¢roidMass, XSIZE*YSIZE);
unsigned int *centroidCount = NULL;
cudaMalloc(¢roidCount, XSIZE*YSIZE);
float *centroids = NULL;
cudaMalloc(¢roids, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
finishCentroids_64<<<gridBlock,threadBlock>>>(centroidMass,centroidCount,centroids);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
finishCentroids_64<<<gridBlock,threadBlock>>>(centroidMass,centroidCount,centroids);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
finishCentroids_64<<<gridBlock,threadBlock>>>(centroidMass,centroidCount,centroids);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
806d6915d9813abcd3b311720f038766ff52320a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
cout << "Error at line " << line << " : " << hipGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tnp, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(hipSetDevice(1));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
dim3 threadBlock(BLOCK_SIZE,K);
dim3 grid(K);
double *dA,*dB,*dC,*dAT,*dCT,*dTemp;
/* With prefetching begins */
CUDA_SAFE_CALL(hipHostMalloc(&dB,size));
CUDA_SAFE_CALL(hipHostMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dC,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dAT,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dCT,(K*size/N)));
hipStream_t s1,s2,s3;
CUDA_SAFE_CALL(hipStreamCreate(&s1));
CUDA_SAFE_CALL(hipStreamCreate(&s2));
CUDA_SAFE_CALL(hipStreamCreate(&s3));
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(hipMemcpyAsync(dB,hB,size,hipMemcpyHostToDevice,s1));
CUDA_SAFE_CALL(hipMemcpyAsync(dA,hA,K*(size/N),hipMemcpyHostToDevice,s1));
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,s1, dA,dB,dC,N);
for(LONG i=1; i< (N/K); i++){
// Prefetch the next set of rows
CUDA_SAFE_CALL(hipMemcpyAsync(dAT,hA+i*N*K,(K*size/N),hipMemcpyHostToDevice,s2));
CUDA_SAFE_CALL(hipDeviceSynchronize());
//Swap pointers
dTemp = dAT;
dAT = dA;
dA = dTemp;
dTemp = dCT;
dCT = dC;
dC = dTemp;
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,s1, dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpyAsync(C+(i-1)*N*K,dCT,(K*size/N),hipMemcpyDeviceToHost,s3));
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipMemcpyAsync(C+((N/K)-1)*N*K,dC,(K*size/N),hipMemcpyDeviceToHost,s3));
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(hipStreamDestroy(s1));
CUDA_SAFE_CALL(hipStreamDestroy(s2));
CUDA_SAFE_CALL(hipStreamDestroy(s3));
CUDA_SAFE_CALL(hipHostFree(dB));
CUDA_SAFE_CALL(hipHostFree(dA));
CUDA_SAFE_CALL(hipHostFree(dC));
CUDA_SAFE_CALL(hipHostFree(dAT));
CUDA_SAFE_CALL(hipHostFree(dCT));
/* Without prefetching begins */
CUDA_SAFE_CALL(hipMalloc(&dB,size));
CUDA_SAFE_CALL(hipMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(hipMalloc(&dC,(K*size/N)));
gettimeofday(&t1,0);
CUDA_SAFE_CALL(hipMemcpy(dB,hB,size,hipMemcpyHostToDevice));
for(LONG i=0; i< (N/K); i++){
//cout << "Iteration " << i << endl;
CUDA_SAFE_CALL(hipMemcpy(dA,hA+i*N*K,(K*size/N),hipMemcpyHostToDevice));
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock), 0, 0, dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpy(C+i*N*K,dC,(K*size/N),hipMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tnp);
tt = (double) tnp.tv_sec + ((double) tnp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Without Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(hipFree(dB));
CUDA_SAFE_CALL(hipFree(dA));
CUDA_SAFE_CALL(hipFree(dC));
delete [] hA;
delete [] hB;
delete [] hC;
delete [] C;
cout << "Finished." << endl;
return 0;
}
| 806d6915d9813abcd3b311720f038766ff52320a.cu | #include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
cout << "Error at line " << line << " : " << cudaGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tnp, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(cudaSetDevice(1));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
dim3 threadBlock(BLOCK_SIZE,K);
dim3 grid(K);
double *dA,*dB,*dC,*dAT,*dCT,*dTemp;
/* With prefetching begins */
CUDA_SAFE_CALL(cudaMallocHost(&dB,size));
CUDA_SAFE_CALL(cudaMallocHost(&dA,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dC,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dAT,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dCT,(K*size/N)));
cudaStream_t s1,s2,s3;
CUDA_SAFE_CALL(cudaStreamCreate(&s1));
CUDA_SAFE_CALL(cudaStreamCreate(&s2));
CUDA_SAFE_CALL(cudaStreamCreate(&s3));
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(cudaMemcpyAsync(dB,hB,size,cudaMemcpyHostToDevice,s1));
CUDA_SAFE_CALL(cudaMemcpyAsync(dA,hA,K*(size/N),cudaMemcpyHostToDevice,s1));
gpuMM<<<grid,threadBlock,0,s1>>>(dA,dB,dC,N);
for(LONG i=1; i< (N/K); i++){
// Prefetch the next set of rows
CUDA_SAFE_CALL(cudaMemcpyAsync(dAT,hA+i*N*K,(K*size/N),cudaMemcpyHostToDevice,s2));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
//Swap pointers
dTemp = dAT;
dAT = dA;
dA = dTemp;
dTemp = dCT;
dCT = dC;
dC = dTemp;
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock,0,s1>>>(dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpyAsync(C+(i-1)*N*K,dCT,(K*size/N),cudaMemcpyDeviceToHost,s3));
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaMemcpyAsync(C+((N/K)-1)*N*K,dC,(K*size/N),cudaMemcpyDeviceToHost,s3));
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(cudaStreamDestroy(s1));
CUDA_SAFE_CALL(cudaStreamDestroy(s2));
CUDA_SAFE_CALL(cudaStreamDestroy(s3));
CUDA_SAFE_CALL(cudaFreeHost(dB));
CUDA_SAFE_CALL(cudaFreeHost(dA));
CUDA_SAFE_CALL(cudaFreeHost(dC));
CUDA_SAFE_CALL(cudaFreeHost(dAT));
CUDA_SAFE_CALL(cudaFreeHost(dCT));
/* Without prefetching begins */
CUDA_SAFE_CALL(cudaMalloc(&dB,size));
CUDA_SAFE_CALL(cudaMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(cudaMalloc(&dC,(K*size/N)));
gettimeofday(&t1,0);
CUDA_SAFE_CALL(cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice));
for(LONG i=0; i< (N/K); i++){
//cout << "Iteration " << i << endl;
CUDA_SAFE_CALL(cudaMemcpy(dA,hA+i*N*K,(K*size/N),cudaMemcpyHostToDevice));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpy(C+i*N*K,dC,(K*size/N),cudaMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tnp);
tt = (double) tnp.tv_sec + ((double) tnp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Without Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(cudaFree(dB));
CUDA_SAFE_CALL(cudaFree(dA));
CUDA_SAFE_CALL(cudaFree(dC));
delete [] hA;
delete [] hB;
delete [] hC;
delete [] C;
cout << "Finished." << endl;
return 0;
}
|
a75563dc78022c8cee5cf196260fe75881c4999d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "CRC32.cu"
#define rep(idx, max) for(int idx = 0, idx##Max = max; idx < idx##Max; idx ++)
#define erep(idx, min, max) for(int idx = min, idx##Max = max; idx <= idx##Max; idx ++)
#define MAX_BRUTE_LEN 54
#define MAX_CHARSET 256
__constant__ char gd_charset[MAX_CHARSET];
__constant__ int gd_charsetLen;
__constant__ u8 gd_cryptHeader[CRYPTHEADER_SIZE];
__device__ char gd_word[MAX_BRUTE_LEN];
__device__ u32 GetCRC32(u32 n1, u8 n2)
{ return(gd_crc32Tbl[(n1 ^ n2) & 0xFF] ^ (n1 >> 8)); }
__device__ void UpdateKeys(ZIPKEY &key, u8 n)
{
key.x = ::GetCRC32(key.x, n);
key.y = (key.y + (key.x & 0xFF)) * 0x08088405 + 1;
key.z = ::GetCRC32(key.z, key.y >> 24);
}
__device__ u8 Dec(ZIPKEY &key, u8 n)
{
u16 t = ((key.z & 0xFFFF) | 2);
t = ((t * (t ^ 1)) >> 8) & 0xFF;
::UpdateKeys(key, n ^= t);
return n;
}
__device__ void InitDecrypt(ZIPKEY &key, char *lpszPassword)
{
key.x = 0x12345678;
key.y = 0x23456789;
key.z = 0x34567890;
for(char *p = lpszPassword; *p; ::UpdateKeys(key, *(p ++)));
for(int i = 0; i < CRYPTHEADER_SIZE; ::Dec(key, gd_cryptHeader[i ++]));
}
__device__ u32 GetCRC32(ZIPKEY &key, u8 *lpBuf, u32 len, u32 initVal)
{
u32 ret = initVal;
for (u32 i = 0; i < len; i ++) { ret = (ret >> 8) ^ gd_crc32Tbl[ ::Dec(key, lpBuf[i]) ^ (ret & 0xFF)]; }
return ~ret;
}
// 3 -> 000 100 200 010 110 210 020 120 220 ...
__device__ __host__ bool Increment(u8 *indices, int wordLen, int charsetLen, int incBy)
{
for(int i = 0; i < wordLen && incBy > 0; i ++)
{
int add = incBy + indices[i];
indices[i] = add % charsetLen;
incBy = add / charsetLen;
}
return incBy != 0;
}
__global__ void KerCrack(u8 *lpData, int size, int wordLen, int charsetLen, u32 crc32)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
u8 indices[MAX_BRUTE_LEN] = {};
char szPassword[MAX_BRUTE_LEN];
ZIPKEY key;
::Increment(indices, wordLen, charsetLen, idx);
for(int i = 0; i < wordLen; i ++)
{
szPassword[i] = gd_charset[indices[i]];
}
szPassword[wordLen] = '\0';
::InitDecrypt(key, szPassword);
if(crc32 == ::GetCRC32(key, lpData, size, 0xFFFFFFFF))
{
for(char *d = gd_word, *s = szPassword; *(d ++) = *(s ++); );
}
}
| a75563dc78022c8cee5cf196260fe75881c4999d.cu | #include <cuda_runtime.h>
#include "CRC32.cu"
#define rep(idx, max) for(int idx = 0, idx##Max = max; idx < idx##Max; idx ++)
#define erep(idx, min, max) for(int idx = min, idx##Max = max; idx <= idx##Max; idx ++)
#define MAX_BRUTE_LEN 54
#define MAX_CHARSET 256
__constant__ char gd_charset[MAX_CHARSET];
__constant__ int gd_charsetLen;
__constant__ u8 gd_cryptHeader[CRYPTHEADER_SIZE];
__device__ char gd_word[MAX_BRUTE_LEN];
__device__ u32 GetCRC32(u32 n1, u8 n2)
{ return(gd_crc32Tbl[(n1 ^ n2) & 0xFF] ^ (n1 >> 8)); }
__device__ void UpdateKeys(ZIPKEY &key, u8 n)
{
key.x = ::GetCRC32(key.x, n);
key.y = (key.y + (key.x & 0xFF)) * 0x08088405 + 1;
key.z = ::GetCRC32(key.z, key.y >> 24);
}
__device__ u8 Dec(ZIPKEY &key, u8 n)
{
u16 t = ((key.z & 0xFFFF) | 2);
t = ((t * (t ^ 1)) >> 8) & 0xFF;
::UpdateKeys(key, n ^= t);
return n;
}
__device__ void InitDecrypt(ZIPKEY &key, char *lpszPassword)
{
key.x = 0x12345678;
key.y = 0x23456789;
key.z = 0x34567890;
for(char *p = lpszPassword; *p; ::UpdateKeys(key, *(p ++)));
for(int i = 0; i < CRYPTHEADER_SIZE; ::Dec(key, gd_cryptHeader[i ++]));
}
__device__ u32 GetCRC32(ZIPKEY &key, u8 *lpBuf, u32 len, u32 initVal)
{
u32 ret = initVal;
for (u32 i = 0; i < len; i ++) { ret = (ret >> 8) ^ gd_crc32Tbl[ ::Dec(key, lpBuf[i]) ^ (ret & 0xFF)]; }
return ~ret;
}
// 3 -> 000 100 200 010 110 210 020 120 220 ...
__device__ __host__ bool Increment(u8 *indices, int wordLen, int charsetLen, int incBy)
{
for(int i = 0; i < wordLen && incBy > 0; i ++)
{
int add = incBy + indices[i];
indices[i] = add % charsetLen;
incBy = add / charsetLen;
}
return incBy != 0;
}
__global__ void KerCrack(u8 *lpData, int size, int wordLen, int charsetLen, u32 crc32)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
u8 indices[MAX_BRUTE_LEN] = {};
char szPassword[MAX_BRUTE_LEN];
ZIPKEY key;
::Increment(indices, wordLen, charsetLen, idx);
for(int i = 0; i < wordLen; i ++)
{
szPassword[i] = gd_charset[indices[i]];
}
szPassword[wordLen] = '\0';
::InitDecrypt(key, szPassword);
if(crc32 == ::GetCRC32(key, lpData, size, 0xFFFFFFFF))
{
for(char *d = gd_word, *s = szPassword; *(d ++) = *(s ++); );
}
}
|
16bdbc4dd72505b176b6df2636fd45d9b628dd04.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void relu_kernel(float *output, float *input, int batch, int channel, int height, int width, int total_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
if (input[tid] > 0.0f) {
output[tid] = input[tid];
}
else {
output[tid] = 0.0f;
}
}
void relu(float *output, float *input, int batch, int channel, int height, int width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * H * W;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
relu_kernel << < NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, N, C, H, W, TOTAL_SIZE);
} | 16bdbc4dd72505b176b6df2636fd45d9b628dd04.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void relu_kernel(float *output, float *input, int batch, int channel, int height, int width, int total_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
if (input[tid] > 0.0f) {
output[tid] = input[tid];
}
else {
output[tid] = 0.0f;
}
}
void relu(float *output, float *input, int batch, int channel, int height, int width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * H * W;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
relu_kernel << < NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, N, C, H, W, TOTAL_SIZE);
} |
9de0ef6a2097979fc84a615ac5f53997ca9f8117.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2016 Rory mitchell
*/
#include <hipcub/hipcub.hpp>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <random>
#include <vector>
#include "../../../src/common/random.h"
#include "device_helpers_hip.cuh"
#include "find_split.cuh"
#include "gpu_builder.cuh"
#include "types_functions.cuh"
namespace xgboost {
namespace tree {
struct GPUData {
GPUData() : allocated(false), n_features(0), n_instances(0) {}
bool allocated;
int n_features;
int n_instances;
dh::bulk_allocator ba;
GPUTrainingParam param;
dh::dvec<float> fvalues;
dh::dvec<float> fvalues_temp;
dh::dvec<float> fvalues_cached;
dh::dvec<int> foffsets;
dh::dvec<bst_uint> instance_id;
dh::dvec<bst_uint> instance_id_temp;
dh::dvec<bst_uint> instance_id_cached;
dh::dvec<int> feature_id;
dh::dvec<NodeIdT> node_id;
dh::dvec<NodeIdT> node_id_temp;
dh::dvec<NodeIdT> node_id_instance;
dh::dvec<gpu_gpair> gpair;
dh::dvec<Node> nodes;
dh::dvec<Split> split_candidates;
dh::dvec<gpu_gpair> node_sums;
dh::dvec<int> node_offsets;
dh::dvec<int> sort_index_in;
dh::dvec<int> sort_index_out;
dh::dvec<char> cub_mem;
ItemIter items_iter;
void Init(const std::vector<float> &in_fvalues,
const std::vector<int> &in_foffsets,
const std::vector<bst_uint> &in_instance_id,
const std::vector<int> &in_feature_id,
const std::vector<bst_gpair> &in_gpair, bst_uint n_instances_in,
bst_uint n_features_in, int max_depth, const TrainParam ¶m_in) {
n_features = n_features_in;
n_instances = n_instances_in;
uint32_t max_nodes = (1 << (max_depth + 1)) - 1;
uint32_t max_nodes_level = 1 << max_depth;
// Calculate memory for sort
size_t cub_mem_size = 0;
cub::DoubleBuffer<NodeIdT> db_key;
cub::DoubleBuffer<int> db_value;
hipcub::DeviceSegmentedRadixSort::SortPairs(
cub_mem.data(), cub_mem_size, db_key,
db_value, in_fvalues.size(), n_features,
foffsets.data(), foffsets.data() + 1);
// Allocate memory
size_t free_memory = dh::available_memory();
ba.allocate(&fvalues, in_fvalues.size(), &fvalues_temp, in_fvalues.size(),
&fvalues_cached, in_fvalues.size(), &foffsets,
in_foffsets.size(), &instance_id, in_instance_id.size(),
&instance_id_temp, in_instance_id.size(), &instance_id_cached,
in_instance_id.size(), &feature_id, in_feature_id.size(),
&node_id, in_fvalues.size(), &node_id_temp, in_fvalues.size(),
&node_id_instance, n_instances, &gpair, n_instances, &nodes,
max_nodes, &split_candidates, max_nodes_level * n_features,
&node_sums, max_nodes_level * n_features, &node_offsets,
max_nodes_level * n_features, &sort_index_in, in_fvalues.size(),
&sort_index_out, in_fvalues.size(), &cub_mem, cub_mem_size);
if (!param_in.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << "/"
<< free_memory / mb_size << " MB on " << dh::device_name();
}
node_id.fill(0);
node_id_instance.fill(0);
fvalues = in_fvalues;
fvalues_cached = fvalues;
foffsets = in_foffsets;
instance_id = in_instance_id;
instance_id_cached = instance_id;
feature_id = in_feature_id;
param = GPUTrainingParam(param_in.min_child_weight, param_in.reg_lambda,
param_in.reg_alpha, param_in.max_delta_step);
gpair = in_gpair;
nodes.fill(Node());
items_iter = thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(gpair.tbegin(), instance_id.tbegin()),
fvalues.tbegin(), node_id.tbegin()));
allocated = true;
dh::safe_cuda(hipGetLastError());
}
~GPUData() {}
// Reset memory for new boosting iteration
void Reset(const std::vector<bst_gpair> &in_gpair) {
CHECK(allocated);
gpair = in_gpair;
instance_id = instance_id_cached;
fvalues = fvalues_cached;
nodes.fill(Node());
node_id_instance.fill(0);
node_id.fill(0);
}
bool IsAllocated() { return allocated; }
// Gather from node_id_instance into node_id according to instance_id
void GatherNodeId() {
// Update node_id for each item
auto d_node_id = node_id.data();
auto d_node_id_instance = node_id_instance.data();
auto d_instance_id = instance_id.data();
dh::launch_n(fvalues.size(), [=] __device__(bst_uint i) {
// Item item = d_items[i];
d_node_id[i] = d_node_id_instance[d_instance_id[i]];
});
}
};
GPUBuilder::GPUBuilder() { gpu_data = new GPUData(); }
void GPUBuilder::Init(const TrainParam ¶m_in) {
param = param_in;
CHECK(param.max_depth < 16) << "Tree depth too large.";
}
GPUBuilder::~GPUBuilder() { delete gpu_data; }
void GPUBuilder::UpdateNodeId(int level) {
auto *d_node_id_instance = gpu_data->node_id_instance.data();
Node *d_nodes = gpu_data->nodes.data();
dh::launch_n(gpu_data->node_id_instance.size(), [=] __device__(int i) {
NodeIdT item_node_id = d_node_id_instance[i];
if (item_node_id < 0) {
return;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
d_node_id_instance[i] = -1;
} else if (node.split.missing_left) {
d_node_id_instance[i] = item_node_id * 2 + 1;
} else {
d_node_id_instance[i] = item_node_id * 2 + 2;
}
});
dh::safe_cuda(hipDeviceSynchronize());
auto *d_fvalues = gpu_data->fvalues.data();
auto *d_instance_id = gpu_data->instance_id.data();
auto *d_node_id = gpu_data->node_id.data();
auto *d_feature_id = gpu_data->feature_id.data();
// Update node based on fvalue where exists
dh::launch_n(gpu_data->fvalues.size(), [=] __device__(int i) {
NodeIdT item_node_id = d_node_id[i];
if (item_node_id < 0) {
return;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
return;
}
int feature_id = d_feature_id[i];
if (feature_id == node.split.findex) {
float fvalue = d_fvalues[i];
bst_uint instance_id = d_instance_id[i];
if (fvalue < node.split.fvalue) {
d_node_id_instance[instance_id] = item_node_id * 2 + 1;
} else {
d_node_id_instance[instance_id] = item_node_id * 2 + 2;
}
}
});
dh::safe_cuda(hipDeviceSynchronize());
gpu_data->GatherNodeId();
}
void GPUBuilder::Sort(int level) {
thrust::sequence(gpu_data->sort_index_in.tbegin(),
gpu_data->sort_index_in.tend());
cub::DoubleBuffer<NodeIdT> d_keys(gpu_data->node_id.data(),
gpu_data->node_id_temp.data());
cub::DoubleBuffer<int> d_values(gpu_data->sort_index_in.data(),
gpu_data->sort_index_out.data());
size_t temp_size = gpu_data->cub_mem.size();
hipcub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.data(), temp_size, d_keys, d_values,
gpu_data->fvalues.size(), gpu_data->n_features, gpu_data->foffsets.data(),
gpu_data->foffsets.data() + 1);
auto zip = thrust::make_zip_iterator(thrust::make_tuple(
gpu_data->fvalues.tbegin(), gpu_data->instance_id.tbegin()));
auto zip_temp = thrust::make_zip_iterator(thrust::make_tuple(
gpu_data->fvalues_temp.tbegin(), gpu_data->instance_id_temp.tbegin()));
thrust::gather(thrust::device_pointer_cast(d_values.Current()),
thrust::device_pointer_cast(d_values.Current()) +
gpu_data->sort_index_out.size(),
zip, zip_temp);
thrust::copy(zip_temp, zip_temp + gpu_data->fvalues.size(), zip);
if (d_keys.Current() == gpu_data->node_id_temp.data()) {
thrust::copy(gpu_data->node_id_temp.tbegin(), gpu_data->node_id_temp.tend(),
gpu_data->node_id.tbegin());
}
}
void GPUBuilder::Update(const std::vector<bst_gpair> &gpair, DMatrix *p_fmat,
RegTree *p_tree) {
hipProfilerStart();
try {
dh::Timer update;
dh::Timer t;
this->InitData(gpair, *p_fmat, *p_tree);
t.printElapsed("init data");
this->InitFirstNode();
for (int level = 0; level < param.max_depth; level++) {
bool use_multiscan_algorithm = level < multiscan_levels;
t.reset();
if (level > 0) {
dh::Timer update_node;
this->UpdateNodeId(level);
update_node.printElapsed("node");
}
if (level > 0 && !use_multiscan_algorithm) {
dh::Timer s;
this->Sort(level);
s.printElapsed("sort");
}
dh::Timer split;
find_split(gpu_data->items_iter, gpu_data->split_candidates.data(),
gpu_data->nodes.data(), (bst_uint)gpu_data->fvalues.size(),
gpu_data->n_features, gpu_data->foffsets.data(),
gpu_data->node_sums.data(), gpu_data->node_offsets.data(),
gpu_data->param, level, use_multiscan_algorithm);
split.printElapsed("split");
t.printElapsed("level");
}
this->CopyTree(*p_tree);
update.printElapsed("update");
} catch (thrust::system_error &e) {
std::cerr << "CUDA error: " << e.what() << std::endl;
exit(-1);
} catch (const std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
} catch (...) {
std::cerr << "Unknown exception." << std::endl;
exit(-1);
}
hipProfilerStop();
}
float GPUBuilder::GetSubsamplingRate(MetaInfo info) {
float subsample = 1.0;
uint32_t max_nodes = (1 << (param.max_depth + 1)) - 1;
uint32_t max_nodes_level = 1 << param.max_depth;
size_t required = 10 * info.num_row + 40 * info.num_nonzero
+ 64 * max_nodes + 76 * max_nodes_level * info.num_col;
size_t available = dh::available_memory();
while (available < required) {
subsample -= 0.05;
required = 10 * info.num_row + subsample * (44 * info.num_nonzero);
}
return subsample;
}
void GPUBuilder::InitData(const std::vector<bst_gpair> &gpair, DMatrix &fmat,
const RegTree &tree) {
CHECK(fmat.SingleColBlock()) << "GPUMaker: must have single column block";
if (gpu_data->IsAllocated()) {
gpu_data->Reset(gpair);
return;
}
dh::Timer t;
MetaInfo info = fmat.info();
// Work out if dataset will fit on GPU
float subsample = this->GetSubsamplingRate(info);
CHECK(subsample > 0.0);
if (!param.silent && subsample < param.subsample) {
LOG(CONSOLE) << "Not enough device memory for entire dataset.";
}
// Override subsample parameter if user-specified parameter is lower
subsample = ::min(param.subsample, subsample);
std::vector<bool> row_flags;
if (subsample < 1.0) {
if (!param.silent && subsample < 1.0) {
LOG(CONSOLE) << "Subsampling " << subsample * 100 << "% of rows.";
}
const RowSet &rowset = fmat.buffered_rowset();
row_flags.resize(info.num_row);
std::bernoulli_distribution coin_flip(subsample);
auto &rnd = common::GlobalRandom();
for (size_t i = 0; i < rowset.size(); ++i) {
const bst_uint ridx = rowset[i];
if (gpair[ridx].hess < 0.0f)
continue;
row_flags[ridx] = coin_flip(rnd);
}
}
std::vector<int> foffsets;
foffsets.push_back(0);
std::vector<int> feature_id;
std::vector<float> fvalues;
std::vector<bst_uint> instance_id;
fvalues.reserve(info.num_col * info.num_row);
instance_id.reserve(info.num_col * info.num_row);
feature_id.reserve(info.num_col * info.num_row);
dmlc::DataIter<ColBatch> *iter = fmat.ColIterator();
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst &col = batch[i];
for (const ColBatch::Entry *it = col.data; it != col.data + col.length;
it++) {
bst_uint inst_id = it->index;
if (subsample < 1.0) {
if (row_flags[inst_id]) {
fvalues.push_back(it->fvalue);
instance_id.push_back(inst_id);
feature_id.push_back(i);
}
} else {
fvalues.push_back(it->fvalue);
instance_id.push_back(inst_id);
feature_id.push_back(i);
}
}
foffsets.push_back(fvalues.size());
}
}
t.printElapsed("dmatrix");
t.reset();
gpu_data->Init(fvalues, foffsets, instance_id, feature_id, gpair,
info.num_row, info.num_col, param.max_depth, param);
t.printElapsed("gpu init");
}
void GPUBuilder::InitFirstNode() {
// Build the root node on the CPU and copy to device
gpu_gpair sum_gradients =
thrust::reduce(gpu_data->gpair.tbegin(), gpu_data->gpair.tend(),
gpu_gpair(0, 0), hipcub::Sum());
Node tmp = Node(
sum_gradients,
CalcGain(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()),
CalcWeight(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()));
thrust::copy_n(&tmp, 1, gpu_data->nodes.tbegin());
}
enum NodeType {
NODE = 0,
LEAF = 1,
UNUSED = 2,
};
// Recursively label node types
void flag_nodes(const thrust::host_vector<Node> &nodes,
std::vector<NodeType> *node_flags, int nid, NodeType type) {
if (nid >= nodes.size() || type == UNUSED) {
return;
}
const Node &n = nodes[nid];
// Current node and all children are valid
if (n.split.loss_chg > rt_eps) {
(*node_flags)[nid] = NODE;
flag_nodes(nodes, node_flags, nid * 2 + 1, NODE);
flag_nodes(nodes, node_flags, nid * 2 + 2, NODE);
} else {
// Current node is leaf, therefore is valid but all children are invalid
(*node_flags)[nid] = LEAF;
flag_nodes(nodes, node_flags, nid * 2 + 1, UNUSED);
flag_nodes(nodes, node_flags, nid * 2 + 2, UNUSED);
}
}
// Copy gpu dense representation of tree to xgboost sparse representation
void GPUBuilder::CopyTree(RegTree &tree) {
std::vector<Node> h_nodes = gpu_data->nodes.as_vector();
std::vector<NodeType> node_flags(h_nodes.size(), UNUSED);
flag_nodes(h_nodes, &node_flags, 0, NODE);
int nid = 0;
for (int gpu_nid = 0; gpu_nid < h_nodes.size(); gpu_nid++) {
NodeType flag = node_flags[gpu_nid];
const Node &n = h_nodes[gpu_nid];
if (flag == NODE) {
tree.AddChilds(nid);
tree[nid].set_split(n.split.findex, n.split.fvalue, n.split.missing_left);
tree.stat(nid).loss_chg = n.split.loss_chg;
tree.stat(nid).base_weight = n.weight;
tree.stat(nid).sum_hess = n.sum_gradients.hess();
tree[tree[nid].cleft()].set_leaf(0);
tree[tree[nid].cright()].set_leaf(0);
nid++;
} else if (flag == LEAF) {
tree[nid].set_leaf(n.weight * param.learning_rate);
tree.stat(nid).sum_hess = n.sum_gradients.hess();
nid++;
}
}
}
} // namespace tree
} // namespace xgboost
| 9de0ef6a2097979fc84a615ac5f53997ca9f8117.cu | /*!
* Copyright 2016 Rory mitchell
*/
#include <cub/cub.cuh>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <random>
#include <vector>
#include "../../../src/common/random.h"
#include "device_helpers.cuh"
#include "find_split.cuh"
#include "gpu_builder.cuh"
#include "types_functions.cuh"
namespace xgboost {
namespace tree {
struct GPUData {
GPUData() : allocated(false), n_features(0), n_instances(0) {}
bool allocated;
int n_features;
int n_instances;
dh::bulk_allocator ba;
GPUTrainingParam param;
dh::dvec<float> fvalues;
dh::dvec<float> fvalues_temp;
dh::dvec<float> fvalues_cached;
dh::dvec<int> foffsets;
dh::dvec<bst_uint> instance_id;
dh::dvec<bst_uint> instance_id_temp;
dh::dvec<bst_uint> instance_id_cached;
dh::dvec<int> feature_id;
dh::dvec<NodeIdT> node_id;
dh::dvec<NodeIdT> node_id_temp;
dh::dvec<NodeIdT> node_id_instance;
dh::dvec<gpu_gpair> gpair;
dh::dvec<Node> nodes;
dh::dvec<Split> split_candidates;
dh::dvec<gpu_gpair> node_sums;
dh::dvec<int> node_offsets;
dh::dvec<int> sort_index_in;
dh::dvec<int> sort_index_out;
dh::dvec<char> cub_mem;
ItemIter items_iter;
void Init(const std::vector<float> &in_fvalues,
const std::vector<int> &in_foffsets,
const std::vector<bst_uint> &in_instance_id,
const std::vector<int> &in_feature_id,
const std::vector<bst_gpair> &in_gpair, bst_uint n_instances_in,
bst_uint n_features_in, int max_depth, const TrainParam ¶m_in) {
n_features = n_features_in;
n_instances = n_instances_in;
uint32_t max_nodes = (1 << (max_depth + 1)) - 1;
uint32_t max_nodes_level = 1 << max_depth;
// Calculate memory for sort
size_t cub_mem_size = 0;
cub::DoubleBuffer<NodeIdT> db_key;
cub::DoubleBuffer<int> db_value;
cub::DeviceSegmentedRadixSort::SortPairs(
cub_mem.data(), cub_mem_size, db_key,
db_value, in_fvalues.size(), n_features,
foffsets.data(), foffsets.data() + 1);
// Allocate memory
size_t free_memory = dh::available_memory();
ba.allocate(&fvalues, in_fvalues.size(), &fvalues_temp, in_fvalues.size(),
&fvalues_cached, in_fvalues.size(), &foffsets,
in_foffsets.size(), &instance_id, in_instance_id.size(),
&instance_id_temp, in_instance_id.size(), &instance_id_cached,
in_instance_id.size(), &feature_id, in_feature_id.size(),
&node_id, in_fvalues.size(), &node_id_temp, in_fvalues.size(),
&node_id_instance, n_instances, &gpair, n_instances, &nodes,
max_nodes, &split_candidates, max_nodes_level * n_features,
&node_sums, max_nodes_level * n_features, &node_offsets,
max_nodes_level * n_features, &sort_index_in, in_fvalues.size(),
&sort_index_out, in_fvalues.size(), &cub_mem, cub_mem_size);
if (!param_in.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << "/"
<< free_memory / mb_size << " MB on " << dh::device_name();
}
node_id.fill(0);
node_id_instance.fill(0);
fvalues = in_fvalues;
fvalues_cached = fvalues;
foffsets = in_foffsets;
instance_id = in_instance_id;
instance_id_cached = instance_id;
feature_id = in_feature_id;
param = GPUTrainingParam(param_in.min_child_weight, param_in.reg_lambda,
param_in.reg_alpha, param_in.max_delta_step);
gpair = in_gpair;
nodes.fill(Node());
items_iter = thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(gpair.tbegin(), instance_id.tbegin()),
fvalues.tbegin(), node_id.tbegin()));
allocated = true;
dh::safe_cuda(cudaGetLastError());
}
~GPUData() {}
// Reset memory for new boosting iteration
void Reset(const std::vector<bst_gpair> &in_gpair) {
CHECK(allocated);
gpair = in_gpair;
instance_id = instance_id_cached;
fvalues = fvalues_cached;
nodes.fill(Node());
node_id_instance.fill(0);
node_id.fill(0);
}
bool IsAllocated() { return allocated; }
// Gather from node_id_instance into node_id according to instance_id
void GatherNodeId() {
// Update node_id for each item
auto d_node_id = node_id.data();
auto d_node_id_instance = node_id_instance.data();
auto d_instance_id = instance_id.data();
dh::launch_n(fvalues.size(), [=] __device__(bst_uint i) {
// Item item = d_items[i];
d_node_id[i] = d_node_id_instance[d_instance_id[i]];
});
}
};
GPUBuilder::GPUBuilder() { gpu_data = new GPUData(); }
void GPUBuilder::Init(const TrainParam ¶m_in) {
param = param_in;
CHECK(param.max_depth < 16) << "Tree depth too large.";
}
GPUBuilder::~GPUBuilder() { delete gpu_data; }
void GPUBuilder::UpdateNodeId(int level) {
auto *d_node_id_instance = gpu_data->node_id_instance.data();
Node *d_nodes = gpu_data->nodes.data();
dh::launch_n(gpu_data->node_id_instance.size(), [=] __device__(int i) {
NodeIdT item_node_id = d_node_id_instance[i];
if (item_node_id < 0) {
return;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
d_node_id_instance[i] = -1;
} else if (node.split.missing_left) {
d_node_id_instance[i] = item_node_id * 2 + 1;
} else {
d_node_id_instance[i] = item_node_id * 2 + 2;
}
});
dh::safe_cuda(cudaDeviceSynchronize());
auto *d_fvalues = gpu_data->fvalues.data();
auto *d_instance_id = gpu_data->instance_id.data();
auto *d_node_id = gpu_data->node_id.data();
auto *d_feature_id = gpu_data->feature_id.data();
// Update node based on fvalue where exists
dh::launch_n(gpu_data->fvalues.size(), [=] __device__(int i) {
NodeIdT item_node_id = d_node_id[i];
if (item_node_id < 0) {
return;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
return;
}
int feature_id = d_feature_id[i];
if (feature_id == node.split.findex) {
float fvalue = d_fvalues[i];
bst_uint instance_id = d_instance_id[i];
if (fvalue < node.split.fvalue) {
d_node_id_instance[instance_id] = item_node_id * 2 + 1;
} else {
d_node_id_instance[instance_id] = item_node_id * 2 + 2;
}
}
});
dh::safe_cuda(cudaDeviceSynchronize());
gpu_data->GatherNodeId();
}
void GPUBuilder::Sort(int level) {
thrust::sequence(gpu_data->sort_index_in.tbegin(),
gpu_data->sort_index_in.tend());
cub::DoubleBuffer<NodeIdT> d_keys(gpu_data->node_id.data(),
gpu_data->node_id_temp.data());
cub::DoubleBuffer<int> d_values(gpu_data->sort_index_in.data(),
gpu_data->sort_index_out.data());
size_t temp_size = gpu_data->cub_mem.size();
cub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.data(), temp_size, d_keys, d_values,
gpu_data->fvalues.size(), gpu_data->n_features, gpu_data->foffsets.data(),
gpu_data->foffsets.data() + 1);
auto zip = thrust::make_zip_iterator(thrust::make_tuple(
gpu_data->fvalues.tbegin(), gpu_data->instance_id.tbegin()));
auto zip_temp = thrust::make_zip_iterator(thrust::make_tuple(
gpu_data->fvalues_temp.tbegin(), gpu_data->instance_id_temp.tbegin()));
thrust::gather(thrust::device_pointer_cast(d_values.Current()),
thrust::device_pointer_cast(d_values.Current()) +
gpu_data->sort_index_out.size(),
zip, zip_temp);
thrust::copy(zip_temp, zip_temp + gpu_data->fvalues.size(), zip);
if (d_keys.Current() == gpu_data->node_id_temp.data()) {
thrust::copy(gpu_data->node_id_temp.tbegin(), gpu_data->node_id_temp.tend(),
gpu_data->node_id.tbegin());
}
}
void GPUBuilder::Update(const std::vector<bst_gpair> &gpair, DMatrix *p_fmat,
RegTree *p_tree) {
cudaProfilerStart();
try {
dh::Timer update;
dh::Timer t;
this->InitData(gpair, *p_fmat, *p_tree);
t.printElapsed("init data");
this->InitFirstNode();
for (int level = 0; level < param.max_depth; level++) {
bool use_multiscan_algorithm = level < multiscan_levels;
t.reset();
if (level > 0) {
dh::Timer update_node;
this->UpdateNodeId(level);
update_node.printElapsed("node");
}
if (level > 0 && !use_multiscan_algorithm) {
dh::Timer s;
this->Sort(level);
s.printElapsed("sort");
}
dh::Timer split;
find_split(gpu_data->items_iter, gpu_data->split_candidates.data(),
gpu_data->nodes.data(), (bst_uint)gpu_data->fvalues.size(),
gpu_data->n_features, gpu_data->foffsets.data(),
gpu_data->node_sums.data(), gpu_data->node_offsets.data(),
gpu_data->param, level, use_multiscan_algorithm);
split.printElapsed("split");
t.printElapsed("level");
}
this->CopyTree(*p_tree);
update.printElapsed("update");
} catch (thrust::system_error &e) {
std::cerr << "CUDA error: " << e.what() << std::endl;
exit(-1);
} catch (const std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
} catch (...) {
std::cerr << "Unknown exception." << std::endl;
exit(-1);
}
cudaProfilerStop();
}
float GPUBuilder::GetSubsamplingRate(MetaInfo info) {
float subsample = 1.0;
uint32_t max_nodes = (1 << (param.max_depth + 1)) - 1;
uint32_t max_nodes_level = 1 << param.max_depth;
size_t required = 10 * info.num_row + 40 * info.num_nonzero
+ 64 * max_nodes + 76 * max_nodes_level * info.num_col;
size_t available = dh::available_memory();
while (available < required) {
subsample -= 0.05;
required = 10 * info.num_row + subsample * (44 * info.num_nonzero);
}
return subsample;
}
void GPUBuilder::InitData(const std::vector<bst_gpair> &gpair, DMatrix &fmat,
const RegTree &tree) {
CHECK(fmat.SingleColBlock()) << "GPUMaker: must have single column block";
if (gpu_data->IsAllocated()) {
gpu_data->Reset(gpair);
return;
}
dh::Timer t;
MetaInfo info = fmat.info();
// Work out if dataset will fit on GPU
float subsample = this->GetSubsamplingRate(info);
CHECK(subsample > 0.0);
if (!param.silent && subsample < param.subsample) {
LOG(CONSOLE) << "Not enough device memory for entire dataset.";
}
// Override subsample parameter if user-specified parameter is lower
subsample = std::min(param.subsample, subsample);
std::vector<bool> row_flags;
if (subsample < 1.0) {
if (!param.silent && subsample < 1.0) {
LOG(CONSOLE) << "Subsampling " << subsample * 100 << "% of rows.";
}
const RowSet &rowset = fmat.buffered_rowset();
row_flags.resize(info.num_row);
std::bernoulli_distribution coin_flip(subsample);
auto &rnd = common::GlobalRandom();
for (size_t i = 0; i < rowset.size(); ++i) {
const bst_uint ridx = rowset[i];
if (gpair[ridx].hess < 0.0f)
continue;
row_flags[ridx] = coin_flip(rnd);
}
}
std::vector<int> foffsets;
foffsets.push_back(0);
std::vector<int> feature_id;
std::vector<float> fvalues;
std::vector<bst_uint> instance_id;
fvalues.reserve(info.num_col * info.num_row);
instance_id.reserve(info.num_col * info.num_row);
feature_id.reserve(info.num_col * info.num_row);
dmlc::DataIter<ColBatch> *iter = fmat.ColIterator();
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst &col = batch[i];
for (const ColBatch::Entry *it = col.data; it != col.data + col.length;
it++) {
bst_uint inst_id = it->index;
if (subsample < 1.0) {
if (row_flags[inst_id]) {
fvalues.push_back(it->fvalue);
instance_id.push_back(inst_id);
feature_id.push_back(i);
}
} else {
fvalues.push_back(it->fvalue);
instance_id.push_back(inst_id);
feature_id.push_back(i);
}
}
foffsets.push_back(fvalues.size());
}
}
t.printElapsed("dmatrix");
t.reset();
gpu_data->Init(fvalues, foffsets, instance_id, feature_id, gpair,
info.num_row, info.num_col, param.max_depth, param);
t.printElapsed("gpu init");
}
void GPUBuilder::InitFirstNode() {
// Build the root node on the CPU and copy to device
gpu_gpair sum_gradients =
thrust::reduce(gpu_data->gpair.tbegin(), gpu_data->gpair.tend(),
gpu_gpair(0, 0), cub::Sum());
Node tmp = Node(
sum_gradients,
CalcGain(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()),
CalcWeight(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()));
thrust::copy_n(&tmp, 1, gpu_data->nodes.tbegin());
}
enum NodeType {
NODE = 0,
LEAF = 1,
UNUSED = 2,
};
// Recursively label node types
void flag_nodes(const thrust::host_vector<Node> &nodes,
std::vector<NodeType> *node_flags, int nid, NodeType type) {
if (nid >= nodes.size() || type == UNUSED) {
return;
}
const Node &n = nodes[nid];
// Current node and all children are valid
if (n.split.loss_chg > rt_eps) {
(*node_flags)[nid] = NODE;
flag_nodes(nodes, node_flags, nid * 2 + 1, NODE);
flag_nodes(nodes, node_flags, nid * 2 + 2, NODE);
} else {
// Current node is leaf, therefore is valid but all children are invalid
(*node_flags)[nid] = LEAF;
flag_nodes(nodes, node_flags, nid * 2 + 1, UNUSED);
flag_nodes(nodes, node_flags, nid * 2 + 2, UNUSED);
}
}
// Copy gpu dense representation of tree to xgboost sparse representation
void GPUBuilder::CopyTree(RegTree &tree) {
std::vector<Node> h_nodes = gpu_data->nodes.as_vector();
std::vector<NodeType> node_flags(h_nodes.size(), UNUSED);
flag_nodes(h_nodes, &node_flags, 0, NODE);
int nid = 0;
for (int gpu_nid = 0; gpu_nid < h_nodes.size(); gpu_nid++) {
NodeType flag = node_flags[gpu_nid];
const Node &n = h_nodes[gpu_nid];
if (flag == NODE) {
tree.AddChilds(nid);
tree[nid].set_split(n.split.findex, n.split.fvalue, n.split.missing_left);
tree.stat(nid).loss_chg = n.split.loss_chg;
tree.stat(nid).base_weight = n.weight;
tree.stat(nid).sum_hess = n.sum_gradients.hess();
tree[tree[nid].cleft()].set_leaf(0);
tree[tree[nid].cright()].set_leaf(0);
nid++;
} else if (flag == LEAF) {
tree[nid].set_leaf(n.weight * param.learning_rate);
tree.stat(nid).sum_hess = n.sum_gradients.hess();
nid++;
}
}
}
} // namespace tree
} // namespace xgboost
|
aec48663ac877053260e359e49a1aa71f69b989b.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "path_aggregation.hpp"
#include "vertical_path_aggregation.hpp"
#include "horizontal_path_aggregation.hpp"
#include "oblique_path_aggregation.hpp"
namespace sgm {
template <size_t MAX_DISPARITY>
PathAggregation<MAX_DISPARITY>::PathAggregation()
: m_cost_buffer()
{
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
hipStreamCreate(&m_streams[i]);
hipEventCreate(&m_events[i]);
}
}
template <size_t MAX_DISPARITY>
PathAggregation<MAX_DISPARITY>::~PathAggregation(){
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
hipStreamSynchronize(m_streams[i]);
hipStreamDestroy(m_streams[i]);
hipEventDestroy(m_events[i]);
}
}
template <size_t MAX_DISPARITY>
void PathAggregation<MAX_DISPARITY>::enqueue(
const feature_type *left,
const feature_type *right,
int width,
int height,
PathType path_type,
unsigned int p1,
unsigned int p2,
hipStream_t stream)
{
const unsigned int num_paths = path_type == PathType::SCAN_4PATH ? 4 : 8;
const size_t buffer_size = width * height * MAX_DISPARITY * num_paths;
if(m_cost_buffer.size() != buffer_size){
m_cost_buffer = DeviceBuffer<cost_type>(buffer_size);
}
const size_t buffer_step = width * height * MAX_DISPARITY;
hipStreamSynchronize(stream);
path_aggregation::enqueue_aggregate_up2down_path<MAX_DISPARITY>(
m_cost_buffer.data() + 0 * buffer_step,
left, right, width, height, p1, p2, m_streams[0]);
path_aggregation::enqueue_aggregate_down2up_path<MAX_DISPARITY>(
m_cost_buffer.data() + 1 * buffer_step,
left, right, width, height, p1, p2, m_streams[1]);
path_aggregation::enqueue_aggregate_left2right_path<MAX_DISPARITY>(
m_cost_buffer.data() + 2 * buffer_step,
left, right, width, height, p1, p2, m_streams[2]);
path_aggregation::enqueue_aggregate_right2left_path<MAX_DISPARITY>(
m_cost_buffer.data() + 3 * buffer_step,
left, right, width, height, p1, p2, m_streams[3]);
if (path_type == PathType::SCAN_8PATH) {
path_aggregation::enqueue_aggregate_upleft2downright_path<MAX_DISPARITY>(
m_cost_buffer.data() + 4 * buffer_step,
left, right, width, height, p1, p2, m_streams[4]);
path_aggregation::enqueue_aggregate_upright2downleft_path<MAX_DISPARITY>(
m_cost_buffer.data() + 5 * buffer_step,
left, right, width, height, p1, p2, m_streams[5]);
path_aggregation::enqueue_aggregate_downright2upleft_path<MAX_DISPARITY>(
m_cost_buffer.data() + 6 * buffer_step,
left, right, width, height, p1, p2, m_streams[6]);
path_aggregation::enqueue_aggregate_downleft2upright_path<MAX_DISPARITY>(
m_cost_buffer.data() + 7 * buffer_step,
left, right, width, height, p1, p2, m_streams[7]);
}
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
hipEventRecord(m_events[i], m_streams[i]);
hipStreamWaitEvent(stream, m_events[i], 0);
}
}
template class PathAggregation< 64>;
template class PathAggregation<128>;
template class PathAggregation<256>;
}
| aec48663ac877053260e359e49a1aa71f69b989b.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "path_aggregation.hpp"
#include "vertical_path_aggregation.hpp"
#include "horizontal_path_aggregation.hpp"
#include "oblique_path_aggregation.hpp"
namespace sgm {
template <size_t MAX_DISPARITY>
PathAggregation<MAX_DISPARITY>::PathAggregation()
: m_cost_buffer()
{
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
cudaStreamCreate(&m_streams[i]);
cudaEventCreate(&m_events[i]);
}
}
template <size_t MAX_DISPARITY>
PathAggregation<MAX_DISPARITY>::~PathAggregation(){
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
cudaStreamSynchronize(m_streams[i]);
cudaStreamDestroy(m_streams[i]);
cudaEventDestroy(m_events[i]);
}
}
template <size_t MAX_DISPARITY>
void PathAggregation<MAX_DISPARITY>::enqueue(
const feature_type *left,
const feature_type *right,
int width,
int height,
PathType path_type,
unsigned int p1,
unsigned int p2,
cudaStream_t stream)
{
const unsigned int num_paths = path_type == PathType::SCAN_4PATH ? 4 : 8;
const size_t buffer_size = width * height * MAX_DISPARITY * num_paths;
if(m_cost_buffer.size() != buffer_size){
m_cost_buffer = DeviceBuffer<cost_type>(buffer_size);
}
const size_t buffer_step = width * height * MAX_DISPARITY;
cudaStreamSynchronize(stream);
path_aggregation::enqueue_aggregate_up2down_path<MAX_DISPARITY>(
m_cost_buffer.data() + 0 * buffer_step,
left, right, width, height, p1, p2, m_streams[0]);
path_aggregation::enqueue_aggregate_down2up_path<MAX_DISPARITY>(
m_cost_buffer.data() + 1 * buffer_step,
left, right, width, height, p1, p2, m_streams[1]);
path_aggregation::enqueue_aggregate_left2right_path<MAX_DISPARITY>(
m_cost_buffer.data() + 2 * buffer_step,
left, right, width, height, p1, p2, m_streams[2]);
path_aggregation::enqueue_aggregate_right2left_path<MAX_DISPARITY>(
m_cost_buffer.data() + 3 * buffer_step,
left, right, width, height, p1, p2, m_streams[3]);
if (path_type == PathType::SCAN_8PATH) {
path_aggregation::enqueue_aggregate_upleft2downright_path<MAX_DISPARITY>(
m_cost_buffer.data() + 4 * buffer_step,
left, right, width, height, p1, p2, m_streams[4]);
path_aggregation::enqueue_aggregate_upright2downleft_path<MAX_DISPARITY>(
m_cost_buffer.data() + 5 * buffer_step,
left, right, width, height, p1, p2, m_streams[5]);
path_aggregation::enqueue_aggregate_downright2upleft_path<MAX_DISPARITY>(
m_cost_buffer.data() + 6 * buffer_step,
left, right, width, height, p1, p2, m_streams[6]);
path_aggregation::enqueue_aggregate_downleft2upright_path<MAX_DISPARITY>(
m_cost_buffer.data() + 7 * buffer_step,
left, right, width, height, p1, p2, m_streams[7]);
}
for(unsigned int i = 0; i < MAX_NUM_PATHS; ++i){
cudaEventRecord(m_events[i], m_streams[i]);
cudaStreamWaitEvent(stream, m_events[i], 0);
}
}
template class PathAggregation< 64>;
template class PathAggregation<128>;
template class PathAggregation<256>;
}
|
441e070638f9547584d551435b82bdcc0ba0e25d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include "eddl/hardware/gpu/nn/gpu_tensor_nn_kernels.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void repeat_nn_k(float *a, int batch, int depth, int a_rows, int a_cols, float *b, int b_rows, int b_cols, int *size){
long int ops=batch*depth*b_rows*b_cols;
long int thread_id_x = blockIdx.x*blockDim.x + threadIdx.x;
if (thread_id_x < ops){
// output pixel at batch=ob, coord=(or,oc) at map=oz
int b_rcd=depth*b_rows*b_cols; // out size of batch
int b_rc=b_rows*b_cols; // out size of slice
int batch_i=thread_id_x/b_rcd; // current batch (ib=ob)
int bm=thread_id_x%b_rcd; // index in batch
int depth_i=bm/b_rc; // current batch (ib=ob)
int row_b=(bm%b_rc)/b_cols; // out row
int col_b=(bm%b_rc)%b_cols; // out col
int a_rcd=depth*a_rows*a_cols; // out size of batch
int a_rc=a_rows*a_cols; // out size of slice
int arow_i = row_b/size[0];
int acol_i = col_b/size[1];
long int offset_a = (batch_i*a_rcd) + (depth_i*a_rc) + (arow_i*a_cols) + acol_i;
// printf("offset_a: %ld, batch: %d, depth: %d, arow_i: %d, acol_i: %d\n", offset_a, batch_i, depth_i, arow_i, acol_i );
b[thread_id_x] = a[offset_a];
}
}
__global__ void d_repeat_nn_k(float *d, int batch, int depth, int d_rows, int d_cols, float *a, int a_rows, int a_cols, int *size){
long int ops=batch*depth*d_rows*d_cols;
long int thread_id_x = blockIdx.x*blockDim.x + threadIdx.x;
if (thread_id_x < ops){
// output pixel at batch=ob, coord=(or,oc) at map=oz
int d_rcd=depth*d_rows*d_cols; // out size of batch
int d_rc=d_rows*d_cols; // out size of slice
int batch_i=thread_id_x/d_rcd; // current batch (ib=ob)
int bm=thread_id_x%d_rcd; // index in batch
int depth_i=bm/d_rc; // current batch (ib=ob)
int row_d=(bm%d_rc)/d_cols; // out row
int col_d=(bm%d_rc)%d_cols; // out col
int a_rcd=depth*a_rows*a_cols; // out size of batch
int a_rc=a_rows*a_cols; // out size of slice
int arow_i = row_d/size[0];
int acol_i = col_d/size[1];
long int offset_a = (batch_i*a_rcd) + (depth_i*a_rc) + (arow_i*a_cols) + acol_i;
// printf("offset_a: %ld, batch: %d, depth: %d, arow_i: %d, acol_i: %d\n", offset_a, batch_i, depth_i, arow_i, acol_i );
atomicAdd(&(a[offset_a]), d[thread_id_x]);
}
}
__global__ void gpu_select_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
long int b = thread_id_x / B_batch_str;
long int i = thread_id_x % B_batch_str;
B[thread_id_x] = A[b*A_batch_str + indices[i]];
}
}
__global__ void gpu_select_back_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
long int b = thread_id_x / A_batch_str;
long int i = thread_id_x % A_batch_str;
B[b*B_batch_str + indices[i]] += A[thread_id_x];
}
}
__global__ void gpu_set_select_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
int b = thread_id_x / B_batch_str;
int i = thread_id_x % B_batch_str;
A[b*A_batch_str + indices[i]] = B[thread_id_x];
}
}
__global__ void gpu_set_select_back_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
int b = thread_id_x / B_batch_str;
int i = thread_id_x % B_batch_str;
B[thread_id_x] += A[b*A_batch_str + indices[i]];
}
} | 441e070638f9547584d551435b82bdcc0ba0e25d.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include "eddl/hardware/gpu/nn/gpu_tensor_nn_kernels.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void repeat_nn_k(float *a, int batch, int depth, int a_rows, int a_cols, float *b, int b_rows, int b_cols, int *size){
long int ops=batch*depth*b_rows*b_cols;
long int thread_id_x = blockIdx.x*blockDim.x + threadIdx.x;
if (thread_id_x < ops){
// output pixel at batch=ob, coord=(or,oc) at map=oz
int b_rcd=depth*b_rows*b_cols; // out size of batch
int b_rc=b_rows*b_cols; // out size of slice
int batch_i=thread_id_x/b_rcd; // current batch (ib=ob)
int bm=thread_id_x%b_rcd; // index in batch
int depth_i=bm/b_rc; // current batch (ib=ob)
int row_b=(bm%b_rc)/b_cols; // out row
int col_b=(bm%b_rc)%b_cols; // out col
int a_rcd=depth*a_rows*a_cols; // out size of batch
int a_rc=a_rows*a_cols; // out size of slice
int arow_i = row_b/size[0];
int acol_i = col_b/size[1];
long int offset_a = (batch_i*a_rcd) + (depth_i*a_rc) + (arow_i*a_cols) + acol_i;
// printf("offset_a: %ld, batch: %d, depth: %d, arow_i: %d, acol_i: %d\n", offset_a, batch_i, depth_i, arow_i, acol_i );
b[thread_id_x] = a[offset_a];
}
}
__global__ void d_repeat_nn_k(float *d, int batch, int depth, int d_rows, int d_cols, float *a, int a_rows, int a_cols, int *size){
long int ops=batch*depth*d_rows*d_cols;
long int thread_id_x = blockIdx.x*blockDim.x + threadIdx.x;
if (thread_id_x < ops){
// output pixel at batch=ob, coord=(or,oc) at map=oz
int d_rcd=depth*d_rows*d_cols; // out size of batch
int d_rc=d_rows*d_cols; // out size of slice
int batch_i=thread_id_x/d_rcd; // current batch (ib=ob)
int bm=thread_id_x%d_rcd; // index in batch
int depth_i=bm/d_rc; // current batch (ib=ob)
int row_d=(bm%d_rc)/d_cols; // out row
int col_d=(bm%d_rc)%d_cols; // out col
int a_rcd=depth*a_rows*a_cols; // out size of batch
int a_rc=a_rows*a_cols; // out size of slice
int arow_i = row_d/size[0];
int acol_i = col_d/size[1];
long int offset_a = (batch_i*a_rcd) + (depth_i*a_rc) + (arow_i*a_cols) + acol_i;
// printf("offset_a: %ld, batch: %d, depth: %d, arow_i: %d, acol_i: %d\n", offset_a, batch_i, depth_i, arow_i, acol_i );
atomicAdd(&(a[offset_a]), d[thread_id_x]);
}
}
__global__ void gpu_select_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
long int b = thread_id_x / B_batch_str;
long int i = thread_id_x % B_batch_str;
B[thread_id_x] = A[b*A_batch_str + indices[i]];
}
}
__global__ void gpu_select_back_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
long int b = thread_id_x / A_batch_str;
long int i = thread_id_x % A_batch_str;
B[b*B_batch_str + indices[i]] += A[thread_id_x];
}
}
__global__ void gpu_set_select_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
int b = thread_id_x / B_batch_str;
int i = thread_id_x % B_batch_str;
A[b*A_batch_str + indices[i]] = B[thread_id_x];
}
}
__global__ void gpu_set_select_back_nn(float* A, float* B, long int size, int* indices, int A_batch_str, int B_batch_str){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
int b = thread_id_x / B_batch_str;
int i = thread_id_x % B_batch_str;
B[thread_id_x] += A[b*A_batch_str + indices[i]];
}
} |
82d9da3e26d421b77ac3e8f0f4c6fd1d0748c775.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
static __global__ void ConvertUInt8ToUInt16Kernel(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
int destStrideInPixels = nDestPitch / (sizeof(uint16_t));
*(uchar2 *)&dpUInt16[y * destStrideInPixels + x] = uchar2{ 0, dpUInt8[y * nSrcPitch + x] };
}
static __global__ void ConvertUInt16ToUInt8Kernel(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
int srcStrideInPixels = nSrcPitch / (sizeof(uint16_t));
dpUInt8[y * nDestPitch + x] = ((uchar2 *)&dpUInt16[y * srcStrideInPixels + x])->y;
}
void ConvertUInt8ToUInt16(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( ConvertUInt8ToUInt16Kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, dpUInt8, dpUInt16, nSrcPitch, nDestPitch, nWidth, nHeight);
}
void ConvertUInt16ToUInt8(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( ConvertUInt16ToUInt8Kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, dpUInt16, dpUInt8, nSrcPitch, nDestPitch, nWidth, nHeight);
}
| 82d9da3e26d421b77ac3e8f0f4c6fd1d0748c775.cu | /*
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdio.h>
static __global__ void ConvertUInt8ToUInt16Kernel(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
int destStrideInPixels = nDestPitch / (sizeof(uint16_t));
*(uchar2 *)&dpUInt16[y * destStrideInPixels + x] = uchar2{ 0, dpUInt8[y * nSrcPitch + x] };
}
static __global__ void ConvertUInt16ToUInt8Kernel(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= nWidth || y >= nHeight)
{
return;
}
int srcStrideInPixels = nSrcPitch / (sizeof(uint16_t));
dpUInt8[y * nDestPitch + x] = ((uchar2 *)&dpUInt16[y * srcStrideInPixels + x])->y;
}
void ConvertUInt8ToUInt16(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1);
ConvertUInt8ToUInt16Kernel <<< gridSize, blockSize >>>(dpUInt8, dpUInt16, nSrcPitch, nDestPitch, nWidth, nHeight);
}
void ConvertUInt16ToUInt8(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight)
{
dim3 blockSize(16, 16, 1);
dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1);
ConvertUInt16ToUInt8Kernel <<<gridSize, blockSize >>>(dpUInt16, dpUInt8, nSrcPitch, nDestPitch, nWidth, nHeight);
}
|
cfff75b4e0c2282a185caec6b74112182e5b7090.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <hip/hip_runtime.h>
#include <opencv2/imgcodecs.hpp>
#include <algorithm>
#include <chrono>
#include <memory>
#include <string>
#include "dali/kernels/alloc.h"
#include "dali/test/mat2tensor.h"
#include "dali/kernels/common/copy.h"
#include "dali/kernels/test/test_data.h"
#include "dali/core/tensor_view.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/kernels/imgproc/resample/resampling_impl.cuh"
#include "dali/test/tensor_test_utils.h"
namespace dali {
namespace kernels {
template <typename Dst, typename Src>
__global__ void ResampleHorzTestKernel(
Dst *out, int out_stride, int out_w,
const Src *in, int in_stride, int in_w, int in_h, int channels,
ResamplingFilter filter, int support) {
float scale = static_cast<float>(in_w) / out_w;
int x0 = blockIdx.x * out_w / gridDim.x;
int x1 = (blockIdx.x + 1) * out_w / gridDim.x;
int y0 = blockIdx.y * in_h / gridDim.y;
int y1 = (blockIdx.y + 1) * in_h / gridDim.y;
vec<1, ptrdiff_t> out_strides(out_stride);
vec<1, ptrdiff_t> in_strides(in_stride);
ResampleHorz(
ivec2(x0, y0), ivec2(x1, y1), 0, scale,
out, out_strides, in, in_strides, ivec2(in_w, in_h),
channels, filter, support);
}
template <typename Dst, typename Src>
__global__ void ResampleVertTestKernel(
Dst *out, int out_stride, int out_h,
const Src *in, int in_stride, int in_w, int in_h, int channels,
ResamplingFilter filter, int support) {
float scale = static_cast<float>(in_h) / out_h;
int x0 = blockIdx.x * in_w / gridDim.x;
int x1 = (blockIdx.x + 1) * in_w / gridDim.x;
int y0 = blockIdx.y * out_h / gridDim.y;
int y1 = (blockIdx.y + 1) * out_h / gridDim.y;
vec<1, ptrdiff_t> out_strides(out_stride);
vec<1, ptrdiff_t> in_strides(in_stride);
ResampleVert(
ivec2(x0, y0), ivec2(x1, y1), 0, scale,
out, out_strides, in, in_strides, ivec2(in_w, in_h),
channels, filter, support);
}
TEST(Resample, HorizontalGaussian) {
auto cv_img = testing::data::image("imgproc/checkerboard.png");
auto cv_ref = testing::data::image("imgproc/ref/resampling/resample_horz.png");
ASSERT_FALSE(cv_img.empty()) << "Couldn't load the image";
TensorView<StorageCPU, const uint8_t, 3> img;
ASSERT_NO_FATAL_FAILURE((img = view_as_tensor<uint8_t>(cv_img)));
int channels = img.shape[2];
ASSERT_EQ(channels, 3);
int H = img.shape[0];
int W = img.shape[1];
int outW = W / 2;
auto gpu_mem_in = memory::alloc_unique<uint8_t>(AllocType::GPU, img.num_elements());
auto gpu_mem_out = memory::alloc_unique<uint8_t>(AllocType::GPU, H * outW * channels);
TensorView<StorageGPU, uint8_t, 3> img_in, img_out;
img_in = { gpu_mem_in.get(), img.shape };
img_out = { gpu_mem_out.get(), { H, outW, channels } };
copy(img_in, img); // NOLINT
auto filters = GetResamplingFilters();
ResamplingFilter filter = (*filters)[1];
int radius = 40;
filter.rescale(2*radius+1);
for (int i = 0; i < 100; i++) {
hipLaunchKernelGGL(( ResampleHorzTestKernel), dim3(1), dim3(dim3(32, 24)), ResampleSharedMemSize, 0,
img_out.data, outW*channels, outW, img_in.data, W*channels, W, H, channels,
filter, filter.support());
CUDA_CALL(hipDeviceSynchronize());
}
cv::Mat out;
out.create(H, outW, CV_8UC3);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(out);
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(cv_ref);
copy(img_out_cpu, img_out); // NOLINT
CUDA_CALL(hipDeviceSynchronize());
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(1))) <<
[&]() {
cv::Mat diff;
cv::absdiff(out, cv_ref, diff);
cv::imwrite("resample_horz_dif.png", diff);
return "Test failed. Absolute difference image saved to resample_horz_dif.png";
}();
}
TEST(Resample, VerticalGaussian) {
auto cv_img = testing::data::image("imgproc/checkerboard.png");
auto cv_ref = testing::data::image("imgproc/ref/resampling/resample_vert.png");
ASSERT_FALSE(cv_img.empty()) << "Couldn't load the image";
TensorView<StorageCPU, const uint8_t, 3> img;
ASSERT_NO_FATAL_FAILURE((img = view_as_tensor<uint8_t>(cv_img)));
int channels = img.shape[2];
ASSERT_EQ(channels, 3);
int H = img.shape[0];
int W = img.shape[1];
int outH = H / 2;
auto gpu_mem_in = memory::alloc_unique<uint8_t>(AllocType::GPU, img.num_elements());
auto gpu_mem_out = memory::alloc_unique<uint8_t>(AllocType::GPU, outH * W * channels);
TensorView<StorageGPU, uint8_t, 3> img_in, img_out;
img_in = { gpu_mem_in.get(), img.shape };
img_out = { gpu_mem_out.get(), { outH, W, channels } };
copy(img_in, img); // NOLINT
auto filters = GetResamplingFilters();
ResamplingFilter filter = (*filters)[1];
int radius = 40;
filter.rescale(2*radius+1);
for (int i = 0; i < 100; i++) {
hipLaunchKernelGGL(( ResampleVertTestKernel), dim3(1), dim3(dim3(32, 24)), ResampleSharedMemSize, 0,
img_out.data, W*channels, outH, img_in.data, W*channels, W, H, channels,
filter, filter.support());
CUDA_CALL(hipDeviceSynchronize());
}
cv::Mat out;
out.create(outH, W, CV_8UC3);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(out);
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(cv_ref);
copy(img_out_cpu, img_out); // NOLINT
CUDA_CALL(hipDeviceSynchronize());
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(1))) <<
[&]() {
cv::Mat diff;
cv::absdiff(out, cv_ref, diff);
cv::imwrite("resample_vert_dif.png", diff);
return "Test failed. Absolute difference image saved to resample_hv_dif.png";
}();
}
class ResamplingTest : public ::testing::Test {
public:
enum BlockConfig {
BlockPerImage = 0,
BlockPerSpan = 1,
};
void SetOutputSize(int w, int h) {
out_w_ = w;
out_h_ = h;
scale_x_ = static_cast<double>(out_w_) / InputWidth();
scale_y_ = static_cast<double>(out_h_) / InputWidth();
fixed_size_ = true;
}
void SetScale(double sx, double sy) {
scale_x_ = sx;
scale_y_ = sy;
out_w_ = sx * InputWidth();
out_h_ = sy * InputHeight();
fixed_size_ = false;
}
void SetBlockConfig(BlockConfig config) {
block_config_ = config;
}
void SetSource(const char *input, const char *reference = nullptr) {
input_ = testing::data::image(input);
if (reference) {
reference_ = testing::data::image(reference);
} else {
reference_ = cv::Mat();
}
}
void SetProcessingOrder(bool vert_first = false) {
vert_first_ = vert_first;
}
void SetFilters(ResamplingFilter filter_x, ResamplingFilter filter_y) {
flt_x_ = filter_x;
flt_y_ = filter_y;
}
void CopyOutputToCPU(bool force = false) {
if (force || output_.empty()) {
auto type = CV_MAKETYPE(CV_8U, img_out_.shape[2]);
output_.create(img_out_.shape[0], img_out_.shape[1], type);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(output_);
copy(img_out_cpu, img_out_);
CUDA_CALL(hipDeviceSynchronize());
}
}
void SaveOutput(const char *file) {
CopyOutputToCPU();
cv::imwrite(file, output_);
}
void Verify(double epsilon, const char *diff_image = nullptr) {
ASSERT_FALSE(reference_.empty()) << "Cannot verify with empty refernce";
CopyOutputToCPU();
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(reference_);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(output_);
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(epsilon))) <<
[&]()->std::string {
if (diff_image) {
if (img_out_cpu.shape == img_ref_cpu.shape) {
cv::Mat diff;
cv::absdiff(output_, reference_, diff);
cv::imwrite(diff_image, diff);
return "Test failed. Absolute difference image saved to " + std::string(diff_image);
} else {
return "Test failed. Ouput and reference have different size - no difference written.";
}
} else {
return "Test failed. Difference image not saved (no file name specified).";
}
}();
}
void VerifyWithMargin(int hmargin, int vmargin, double epsilon,
const char *diff_image = nullptr) {
ASSERT_FALSE(reference_.empty()) << "Cannot verify with empty refernce";
CopyOutputToCPU();
cv::Mat tmp_out;
output_.copyTo(tmp_out);
{
int W = tmp_out.cols;
int H = tmp_out.rows;
if (hmargin) {
cv::Rect L(0, 0, hmargin, H);
cv::Rect R(W-hmargin, 0, hmargin, H);
reference_(L).copyTo(tmp_out(L));
reference_(R).copyTo(tmp_out(R));
}
if (vmargin) {
cv::Rect T(0, 0, W, vmargin);
cv::Rect B(0, H-vmargin, W, vmargin);
reference_(T).copyTo(tmp_out(T));
reference_(B).copyTo(tmp_out(B));
}
}
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(reference_);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(tmp_out);
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(epsilon))) <<
[&]()->std::string {
if (diff_image) {
if (img_out_cpu.shape == img_ref_cpu.shape) {
cv::Mat diff;
cv::absdiff(output_, reference_, diff);
cv::imwrite(diff_image, diff);
return "Test failed. Absolute difference image saved to " + std::string(diff_image);
} else {
return "Test failed. Ouput and reference have different size - no difference written.";
}
} else {
return "Test failed. Difference image not saved (no file name specified).";
}
}();
}
void Prepare() {
int W = InputWidth();
int H = InputHeight();
// update output size or scale
if (fixed_size_) {
scale_x_ = static_cast<double>(out_w_) / W;
scale_y_ = static_cast<double>(out_h_) / H;
} else {
out_w_ = W * scale_x_;
out_h_ = H * scale_y_;
}
PrepareTensors(W, H, out_w_, out_h_, input_.channels());
copy(img_in_, view_as_tensor<uint8_t, 3>(input_));
output_ = cv::Mat();
}
void PrepareTensors(int W, int H, int outW, int outH, int channels) {
int tmpW = vert_first_ ? W : outW;
int tmpH = vert_first_ ? outH : H;
gpu_mem_in_ = memory::alloc_unique<uint8_t>(AllocType::GPU, W * H * channels);
gpu_mem_tmp_ = memory::alloc_unique<float>(AllocType::GPU, tmpW * tmpH * channels);
gpu_mem_out_ = memory::alloc_unique<uint8_t>(AllocType::GPU, outW * outH * channels);
img_in_ = { gpu_mem_in_.get(), { H, W, channels } };
img_tmp_ = { gpu_mem_tmp_.get(), { tmpH, tmpW, channels } };
img_out_ = { gpu_mem_out_.get(), { outH, outW, channels } };
}
void Run() {
bool per_span = block_config_ == BlockPerSpan;
int W = img_in_.shape[1];
int H = img_in_.shape[0];
int tmpW = img_tmp_.shape[1];
int tmpH = img_tmp_.shape[0];
int outW = img_out_.shape[1];
int outH = img_out_.shape[0];
int channels = img_in_.shape[2];
assert(img_out_.shape[2] == img_in_.shape[2]);
#if DALI_DEBUG
dim3 block(32, 8);
#else
dim3 block(32, 24);
#endif
if (vert_first_) {
assert(img_tmp_.shape == TensorShape<3>(outH, W, channels));
dim3 grid = per_span ? dim3(div_ceil(tmpW, block.x), div_ceil(tmpH, block.y)) : dim3(1);
hipLaunchKernelGGL(( ResampleVertTestKernel), dim3(grid), dim3(block), ResampleSharedMemSize, 0,
img_tmp_.data, tmpW*channels, tmpH, img_in_.data, W*channels, W, H, channels,
flt_y_, flt_y_.support());
grid = per_span ? dim3(div_ceil(outW, block.x), div_ceil(outH, block.y)) : dim3(1);
hipLaunchKernelGGL(( ResampleHorzTestKernel), dim3(grid), dim3(block), ResampleSharedMemSize, 0,
img_out_.data, outW*channels, outW, img_tmp_.data, tmpW*channels, tmpW, tmpH, channels,
flt_x_, flt_x_.support());
} else {
assert(img_tmp_.shape == TensorShape<3>(H, outW, channels));
dim3 grid = per_span ? dim3(div_ceil(tmpW, block.x), div_ceil(tmpH, block.y)) : dim3(1);
hipLaunchKernelGGL(( ResampleHorzTestKernel), dim3(grid), dim3(block), ResampleSharedMemSize, 0,
img_tmp_.data, tmpW*channels, tmpW, img_in_.data, W*channels, W, H, channels,
flt_x_, flt_x_.support());
grid = per_span ? dim3(div_ceil(outW, block.x), div_ceil(outH, block.y)) : dim3(1);
hipLaunchKernelGGL(( ResampleVertTestKernel), dim3(grid), dim3(block), ResampleSharedMemSize, 0,
img_out_.data, outW*channels, outH, img_tmp_.data, tmpW*channels, tmpW, tmpH, channels,
flt_y_, flt_y_.support());
}
}
int InputWidth() const {
return input_.cols;
}
int InputHeight() const {
return input_.rows;
}
int OutputWidth() const {
return fixed_size_ ? out_w_ : static_cast<int>(InputWidth() * scale_x_);
}
int OutputHeight() const {
return fixed_size_ ? out_h_ : static_cast<int>(InputHeight() * scale_y_);
}
double ScaleX() const {
return fixed_size_ ? static_cast<double>(out_w_) / InputWidth() : scale_x_;
}
double ScaleY() const {
return fixed_size_ ? static_cast<double>(out_h_) / InputHeight() : scale_y_;
}
cv::Mat input_, reference_, output_;
void SetUp() override {
input_ = cv::Mat();
reference_ = cv::Mat();
output_ = cv::Mat();
fixed_size_ = false;
scale_x_ = scale_y_ = 1;
gpu_mem_in_.reset();
gpu_mem_tmp_.reset();
gpu_mem_out_.reset();
img_in_ = {};
img_out_ = {};
img_tmp_ = {};
out_w_ = out_h_ = 0;
block_config_ = BlockPerImage;
vert_first_ = false;
flt_x_ = flt_y_ = {};
}
private:
int out_w_, out_h_;
double scale_x_ = 1, scale_y_ = 1;
bool fixed_size_ = false;
ResamplingFilter flt_x_, flt_y_;
TensorView<StorageGPU, uint8_t, 3> img_in_, img_out_;
TensorView<StorageGPU, float, 3> img_tmp_;
using deleter = std::function<void(void*)>;
std::unique_ptr<uint8_t, deleter> gpu_mem_in_, gpu_mem_out_;
std::unique_ptr<float, deleter> gpu_mem_tmp_;
BlockConfig block_config_ = BlockPerImage;
bool vert_first_ = false;
};
TEST_F(ResamplingTest, ResampleGauss) {
SetSource("imgproc/moire2.png", "imgproc/ref/resampling/resample_out.png");
SetOutputSize(InputWidth()-1, InputHeight()-3);
auto filters = GetResamplingFilters();
float sigmaX = (1/ScaleX() - 0.3f) / sqrt(2);
float sigmaY = (1/ScaleY() - 0.3f) / sqrt(2);
auto fx = filters->Gaussian(sigmaX);
auto fy = filters->Gaussian(sigmaY);
SetFilters(fx, fy);
Prepare();
Run();
// SaveOutput("resample_hv_out.png");
Verify(1, "resample_hv_dif.png");
}
TEST_F(ResamplingTest, ResampleVHGauss) {
SetSource("imgproc/moire2.png", "imgproc/ref/resampling/resample_out.png");
SetOutputSize(InputWidth()-1, InputHeight()-3);
auto filters = GetResamplingFilters();
float sigmaX = (1/ScaleX() - 0.3f) / sqrt(2);
float sigmaY = (1/ScaleY() - 0.3f) / sqrt(2);
auto fx = filters->Gaussian(sigmaX);
auto fy = filters->Gaussian(sigmaY);
SetProcessingOrder(true);
SetFilters(fx, fy);
Prepare();
Run();
// SaveOutput("resample_vh_out.png");
Verify(1, "resample_vh_dif.png");
}
TEST_F(ResamplingTest, SeparableTriangular) {
SetSource("imgproc/alley.png", "imgproc/ref/resampling/alley_tri_PIL.png");
SetOutputSize(300, 300);
auto filters = GetResamplingFilters();
auto fx = filters->Triangular(1 / ScaleX());
auto fy = filters->Triangular(1 / ScaleY());
Prepare();
SetFilters(fx, fy);
Run();
// SaveOutput("alley_tri.png");
VerifyWithMargin(1, 1, 1, "alley_tri_dif.png");
}
TEST_F(ResamplingTest, GaussianBlur) {
SetSource("imgproc/alley.png", "imgproc/ref/resampling/alley_blurred.png");
auto filters = GetResamplingFilters();
float sigmaX = 6.0f / sqrt(2);
float sigmaY = 6.0f / sqrt(2);
SetFilters(filters->Gaussian(sigmaX), filters->Gaussian(sigmaY));
Prepare();
Run();
Verify(2, "blur_dif.png");
}
// DISABLED: this 'test' is only for producing images for manual assessment
TEST_F(ResamplingTest, DISABLED_ProgressiveOutputs) {
SetSource("imgproc/alley.png", nullptr);
auto filters = GetResamplingFilters();
for (int i = 0; i < 10; i++) {
float sigmaX = powf(1.10f, i) * 0.5f;
float sigmaY = powf(1.10f, i) * 0.5f;
ResamplingFilter fx = filters->Gaussian(sigmaX);
ResamplingFilter fy = filters->Gaussian(sigmaY);
SetFilters(fx, fy);
Prepare();
Run();
char name[64] = {};
snprintf(name, sizeof(name), "blur_%i.png", i);
SaveOutput(name);
}
}
TEST_F(ResamplingTest, Lanczos3) {
SetSource("imgproc/score.png", "imgproc/ref/resampling/score_lanczos3.png");
SetScale(5, 5);
auto filters = GetResamplingFilters();
ResamplingFilter f = filters->Lanczos3();
SetFilters(f, f);
Prepare();
Run();
Verify(1, "score_lanczos_dif.png");
}
TEST_F(ResamplingTest, Cubic) {
SetSource("imgproc/score.png", "imgproc/ref/resampling/score_cubic.png");
SetOutputSize(200, 93);
auto filters = GetResamplingFilters();
ResamplingFilter f = filters->Cubic();
SetFilters(f, f);
Prepare();
Run();
Verify(1, "score_cubic_dif.png");
}
} // namespace kernels
} // namespace dali
| cfff75b4e0c2282a185caec6b74112182e5b7090.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <cuda_runtime.h>
#include <opencv2/imgcodecs.hpp>
#include <algorithm>
#include <chrono>
#include <memory>
#include <string>
#include "dali/kernels/alloc.h"
#include "dali/test/mat2tensor.h"
#include "dali/kernels/common/copy.h"
#include "dali/kernels/test/test_data.h"
#include "dali/core/tensor_view.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/kernels/imgproc/resample/resampling_impl.cuh"
#include "dali/test/tensor_test_utils.h"
namespace dali {
namespace kernels {
template <typename Dst, typename Src>
__global__ void ResampleHorzTestKernel(
Dst *out, int out_stride, int out_w,
const Src *in, int in_stride, int in_w, int in_h, int channels,
ResamplingFilter filter, int support) {
float scale = static_cast<float>(in_w) / out_w;
int x0 = blockIdx.x * out_w / gridDim.x;
int x1 = (blockIdx.x + 1) * out_w / gridDim.x;
int y0 = blockIdx.y * in_h / gridDim.y;
int y1 = (blockIdx.y + 1) * in_h / gridDim.y;
vec<1, ptrdiff_t> out_strides(out_stride);
vec<1, ptrdiff_t> in_strides(in_stride);
ResampleHorz(
ivec2(x0, y0), ivec2(x1, y1), 0, scale,
out, out_strides, in, in_strides, ivec2(in_w, in_h),
channels, filter, support);
}
template <typename Dst, typename Src>
__global__ void ResampleVertTestKernel(
Dst *out, int out_stride, int out_h,
const Src *in, int in_stride, int in_w, int in_h, int channels,
ResamplingFilter filter, int support) {
float scale = static_cast<float>(in_h) / out_h;
int x0 = blockIdx.x * in_w / gridDim.x;
int x1 = (blockIdx.x + 1) * in_w / gridDim.x;
int y0 = blockIdx.y * out_h / gridDim.y;
int y1 = (blockIdx.y + 1) * out_h / gridDim.y;
vec<1, ptrdiff_t> out_strides(out_stride);
vec<1, ptrdiff_t> in_strides(in_stride);
ResampleVert(
ivec2(x0, y0), ivec2(x1, y1), 0, scale,
out, out_strides, in, in_strides, ivec2(in_w, in_h),
channels, filter, support);
}
TEST(Resample, HorizontalGaussian) {
auto cv_img = testing::data::image("imgproc/checkerboard.png");
auto cv_ref = testing::data::image("imgproc/ref/resampling/resample_horz.png");
ASSERT_FALSE(cv_img.empty()) << "Couldn't load the image";
TensorView<StorageCPU, const uint8_t, 3> img;
ASSERT_NO_FATAL_FAILURE((img = view_as_tensor<uint8_t>(cv_img)));
int channels = img.shape[2];
ASSERT_EQ(channels, 3);
int H = img.shape[0];
int W = img.shape[1];
int outW = W / 2;
auto gpu_mem_in = memory::alloc_unique<uint8_t>(AllocType::GPU, img.num_elements());
auto gpu_mem_out = memory::alloc_unique<uint8_t>(AllocType::GPU, H * outW * channels);
TensorView<StorageGPU, uint8_t, 3> img_in, img_out;
img_in = { gpu_mem_in.get(), img.shape };
img_out = { gpu_mem_out.get(), { H, outW, channels } };
copy(img_in, img); // NOLINT
auto filters = GetResamplingFilters();
ResamplingFilter filter = (*filters)[1];
int radius = 40;
filter.rescale(2*radius+1);
for (int i = 0; i < 100; i++) {
ResampleHorzTestKernel<<<1, dim3(32, 24), ResampleSharedMemSize>>>(
img_out.data, outW*channels, outW, img_in.data, W*channels, W, H, channels,
filter, filter.support());
CUDA_CALL(cudaDeviceSynchronize());
}
cv::Mat out;
out.create(H, outW, CV_8UC3);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(out);
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(cv_ref);
copy(img_out_cpu, img_out); // NOLINT
CUDA_CALL(cudaDeviceSynchronize());
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(1))) <<
[&]() {
cv::Mat diff;
cv::absdiff(out, cv_ref, diff);
cv::imwrite("resample_horz_dif.png", diff);
return "Test failed. Absolute difference image saved to resample_horz_dif.png";
}();
}
TEST(Resample, VerticalGaussian) {
auto cv_img = testing::data::image("imgproc/checkerboard.png");
auto cv_ref = testing::data::image("imgproc/ref/resampling/resample_vert.png");
ASSERT_FALSE(cv_img.empty()) << "Couldn't load the image";
TensorView<StorageCPU, const uint8_t, 3> img;
ASSERT_NO_FATAL_FAILURE((img = view_as_tensor<uint8_t>(cv_img)));
int channels = img.shape[2];
ASSERT_EQ(channels, 3);
int H = img.shape[0];
int W = img.shape[1];
int outH = H / 2;
auto gpu_mem_in = memory::alloc_unique<uint8_t>(AllocType::GPU, img.num_elements());
auto gpu_mem_out = memory::alloc_unique<uint8_t>(AllocType::GPU, outH * W * channels);
TensorView<StorageGPU, uint8_t, 3> img_in, img_out;
img_in = { gpu_mem_in.get(), img.shape };
img_out = { gpu_mem_out.get(), { outH, W, channels } };
copy(img_in, img); // NOLINT
auto filters = GetResamplingFilters();
ResamplingFilter filter = (*filters)[1];
int radius = 40;
filter.rescale(2*radius+1);
for (int i = 0; i < 100; i++) {
ResampleVertTestKernel<<<1, dim3(32, 24), ResampleSharedMemSize>>>(
img_out.data, W*channels, outH, img_in.data, W*channels, W, H, channels,
filter, filter.support());
CUDA_CALL(cudaDeviceSynchronize());
}
cv::Mat out;
out.create(outH, W, CV_8UC3);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(out);
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(cv_ref);
copy(img_out_cpu, img_out); // NOLINT
CUDA_CALL(cudaDeviceSynchronize());
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(1))) <<
[&]() {
cv::Mat diff;
cv::absdiff(out, cv_ref, diff);
cv::imwrite("resample_vert_dif.png", diff);
return "Test failed. Absolute difference image saved to resample_hv_dif.png";
}();
}
class ResamplingTest : public ::testing::Test {
public:
enum BlockConfig {
BlockPerImage = 0,
BlockPerSpan = 1,
};
void SetOutputSize(int w, int h) {
out_w_ = w;
out_h_ = h;
scale_x_ = static_cast<double>(out_w_) / InputWidth();
scale_y_ = static_cast<double>(out_h_) / InputWidth();
fixed_size_ = true;
}
void SetScale(double sx, double sy) {
scale_x_ = sx;
scale_y_ = sy;
out_w_ = sx * InputWidth();
out_h_ = sy * InputHeight();
fixed_size_ = false;
}
void SetBlockConfig(BlockConfig config) {
block_config_ = config;
}
void SetSource(const char *input, const char *reference = nullptr) {
input_ = testing::data::image(input);
if (reference) {
reference_ = testing::data::image(reference);
} else {
reference_ = cv::Mat();
}
}
void SetProcessingOrder(bool vert_first = false) {
vert_first_ = vert_first;
}
void SetFilters(ResamplingFilter filter_x, ResamplingFilter filter_y) {
flt_x_ = filter_x;
flt_y_ = filter_y;
}
void CopyOutputToCPU(bool force = false) {
if (force || output_.empty()) {
auto type = CV_MAKETYPE(CV_8U, img_out_.shape[2]);
output_.create(img_out_.shape[0], img_out_.shape[1], type);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(output_);
copy(img_out_cpu, img_out_);
CUDA_CALL(cudaDeviceSynchronize());
}
}
void SaveOutput(const char *file) {
CopyOutputToCPU();
cv::imwrite(file, output_);
}
void Verify(double epsilon, const char *diff_image = nullptr) {
ASSERT_FALSE(reference_.empty()) << "Cannot verify with empty refernce";
CopyOutputToCPU();
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(reference_);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(output_);
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(epsilon))) <<
[&]()->std::string {
if (diff_image) {
if (img_out_cpu.shape == img_ref_cpu.shape) {
cv::Mat diff;
cv::absdiff(output_, reference_, diff);
cv::imwrite(diff_image, diff);
return "Test failed. Absolute difference image saved to " + std::string(diff_image);
} else {
return "Test failed. Ouput and reference have different size - no difference written.";
}
} else {
return "Test failed. Difference image not saved (no file name specified).";
}
}();
}
void VerifyWithMargin(int hmargin, int vmargin, double epsilon,
const char *diff_image = nullptr) {
ASSERT_FALSE(reference_.empty()) << "Cannot verify with empty refernce";
CopyOutputToCPU();
cv::Mat tmp_out;
output_.copyTo(tmp_out);
{
int W = tmp_out.cols;
int H = tmp_out.rows;
if (hmargin) {
cv::Rect L(0, 0, hmargin, H);
cv::Rect R(W-hmargin, 0, hmargin, H);
reference_(L).copyTo(tmp_out(L));
reference_(R).copyTo(tmp_out(R));
}
if (vmargin) {
cv::Rect T(0, 0, W, vmargin);
cv::Rect B(0, H-vmargin, W, vmargin);
reference_(T).copyTo(tmp_out(T));
reference_(B).copyTo(tmp_out(B));
}
}
auto img_ref_cpu = view_as_tensor<uint8_t, 3>(reference_);
auto img_out_cpu = view_as_tensor<uint8_t, 3>(tmp_out);
EXPECT_NO_FATAL_FAILURE(Check(img_out_cpu, img_ref_cpu, EqualEps(epsilon))) <<
[&]()->std::string {
if (diff_image) {
if (img_out_cpu.shape == img_ref_cpu.shape) {
cv::Mat diff;
cv::absdiff(output_, reference_, diff);
cv::imwrite(diff_image, diff);
return "Test failed. Absolute difference image saved to " + std::string(diff_image);
} else {
return "Test failed. Ouput and reference have different size - no difference written.";
}
} else {
return "Test failed. Difference image not saved (no file name specified).";
}
}();
}
void Prepare() {
int W = InputWidth();
int H = InputHeight();
// update output size or scale
if (fixed_size_) {
scale_x_ = static_cast<double>(out_w_) / W;
scale_y_ = static_cast<double>(out_h_) / H;
} else {
out_w_ = W * scale_x_;
out_h_ = H * scale_y_;
}
PrepareTensors(W, H, out_w_, out_h_, input_.channels());
copy(img_in_, view_as_tensor<uint8_t, 3>(input_));
output_ = cv::Mat();
}
void PrepareTensors(int W, int H, int outW, int outH, int channels) {
int tmpW = vert_first_ ? W : outW;
int tmpH = vert_first_ ? outH : H;
gpu_mem_in_ = memory::alloc_unique<uint8_t>(AllocType::GPU, W * H * channels);
gpu_mem_tmp_ = memory::alloc_unique<float>(AllocType::GPU, tmpW * tmpH * channels);
gpu_mem_out_ = memory::alloc_unique<uint8_t>(AllocType::GPU, outW * outH * channels);
img_in_ = { gpu_mem_in_.get(), { H, W, channels } };
img_tmp_ = { gpu_mem_tmp_.get(), { tmpH, tmpW, channels } };
img_out_ = { gpu_mem_out_.get(), { outH, outW, channels } };
}
void Run() {
bool per_span = block_config_ == BlockPerSpan;
int W = img_in_.shape[1];
int H = img_in_.shape[0];
int tmpW = img_tmp_.shape[1];
int tmpH = img_tmp_.shape[0];
int outW = img_out_.shape[1];
int outH = img_out_.shape[0];
int channels = img_in_.shape[2];
assert(img_out_.shape[2] == img_in_.shape[2]);
#if DALI_DEBUG
dim3 block(32, 8);
#else
dim3 block(32, 24);
#endif
if (vert_first_) {
assert(img_tmp_.shape == TensorShape<3>(outH, W, channels));
dim3 grid = per_span ? dim3(div_ceil(tmpW, block.x), div_ceil(tmpH, block.y)) : dim3(1);
ResampleVertTestKernel<<<grid, block, ResampleSharedMemSize>>>(
img_tmp_.data, tmpW*channels, tmpH, img_in_.data, W*channels, W, H, channels,
flt_y_, flt_y_.support());
grid = per_span ? dim3(div_ceil(outW, block.x), div_ceil(outH, block.y)) : dim3(1);
ResampleHorzTestKernel<<<grid, block, ResampleSharedMemSize>>>(
img_out_.data, outW*channels, outW, img_tmp_.data, tmpW*channels, tmpW, tmpH, channels,
flt_x_, flt_x_.support());
} else {
assert(img_tmp_.shape == TensorShape<3>(H, outW, channels));
dim3 grid = per_span ? dim3(div_ceil(tmpW, block.x), div_ceil(tmpH, block.y)) : dim3(1);
ResampleHorzTestKernel<<<grid, block, ResampleSharedMemSize>>>(
img_tmp_.data, tmpW*channels, tmpW, img_in_.data, W*channels, W, H, channels,
flt_x_, flt_x_.support());
grid = per_span ? dim3(div_ceil(outW, block.x), div_ceil(outH, block.y)) : dim3(1);
ResampleVertTestKernel<<<grid, block, ResampleSharedMemSize>>>(
img_out_.data, outW*channels, outH, img_tmp_.data, tmpW*channels, tmpW, tmpH, channels,
flt_y_, flt_y_.support());
}
}
int InputWidth() const {
return input_.cols;
}
int InputHeight() const {
return input_.rows;
}
int OutputWidth() const {
return fixed_size_ ? out_w_ : static_cast<int>(InputWidth() * scale_x_);
}
int OutputHeight() const {
return fixed_size_ ? out_h_ : static_cast<int>(InputHeight() * scale_y_);
}
double ScaleX() const {
return fixed_size_ ? static_cast<double>(out_w_) / InputWidth() : scale_x_;
}
double ScaleY() const {
return fixed_size_ ? static_cast<double>(out_h_) / InputHeight() : scale_y_;
}
cv::Mat input_, reference_, output_;
void SetUp() override {
input_ = cv::Mat();
reference_ = cv::Mat();
output_ = cv::Mat();
fixed_size_ = false;
scale_x_ = scale_y_ = 1;
gpu_mem_in_.reset();
gpu_mem_tmp_.reset();
gpu_mem_out_.reset();
img_in_ = {};
img_out_ = {};
img_tmp_ = {};
out_w_ = out_h_ = 0;
block_config_ = BlockPerImage;
vert_first_ = false;
flt_x_ = flt_y_ = {};
}
private:
int out_w_, out_h_;
double scale_x_ = 1, scale_y_ = 1;
bool fixed_size_ = false;
ResamplingFilter flt_x_, flt_y_;
TensorView<StorageGPU, uint8_t, 3> img_in_, img_out_;
TensorView<StorageGPU, float, 3> img_tmp_;
using deleter = std::function<void(void*)>;
std::unique_ptr<uint8_t, deleter> gpu_mem_in_, gpu_mem_out_;
std::unique_ptr<float, deleter> gpu_mem_tmp_;
BlockConfig block_config_ = BlockPerImage;
bool vert_first_ = false;
};
TEST_F(ResamplingTest, ResampleGauss) {
SetSource("imgproc/moire2.png", "imgproc/ref/resampling/resample_out.png");
SetOutputSize(InputWidth()-1, InputHeight()-3);
auto filters = GetResamplingFilters();
float sigmaX = (1/ScaleX() - 0.3f) / sqrt(2);
float sigmaY = (1/ScaleY() - 0.3f) / sqrt(2);
auto fx = filters->Gaussian(sigmaX);
auto fy = filters->Gaussian(sigmaY);
SetFilters(fx, fy);
Prepare();
Run();
// SaveOutput("resample_hv_out.png");
Verify(1, "resample_hv_dif.png");
}
TEST_F(ResamplingTest, ResampleVHGauss) {
SetSource("imgproc/moire2.png", "imgproc/ref/resampling/resample_out.png");
SetOutputSize(InputWidth()-1, InputHeight()-3);
auto filters = GetResamplingFilters();
float sigmaX = (1/ScaleX() - 0.3f) / sqrt(2);
float sigmaY = (1/ScaleY() - 0.3f) / sqrt(2);
auto fx = filters->Gaussian(sigmaX);
auto fy = filters->Gaussian(sigmaY);
SetProcessingOrder(true);
SetFilters(fx, fy);
Prepare();
Run();
// SaveOutput("resample_vh_out.png");
Verify(1, "resample_vh_dif.png");
}
TEST_F(ResamplingTest, SeparableTriangular) {
SetSource("imgproc/alley.png", "imgproc/ref/resampling/alley_tri_PIL.png");
SetOutputSize(300, 300);
auto filters = GetResamplingFilters();
auto fx = filters->Triangular(1 / ScaleX());
auto fy = filters->Triangular(1 / ScaleY());
Prepare();
SetFilters(fx, fy);
Run();
// SaveOutput("alley_tri.png");
VerifyWithMargin(1, 1, 1, "alley_tri_dif.png");
}
TEST_F(ResamplingTest, GaussianBlur) {
SetSource("imgproc/alley.png", "imgproc/ref/resampling/alley_blurred.png");
auto filters = GetResamplingFilters();
float sigmaX = 6.0f / sqrt(2);
float sigmaY = 6.0f / sqrt(2);
SetFilters(filters->Gaussian(sigmaX), filters->Gaussian(sigmaY));
Prepare();
Run();
Verify(2, "blur_dif.png");
}
// DISABLED: this 'test' is only for producing images for manual assessment
TEST_F(ResamplingTest, DISABLED_ProgressiveOutputs) {
SetSource("imgproc/alley.png", nullptr);
auto filters = GetResamplingFilters();
for (int i = 0; i < 10; i++) {
float sigmaX = powf(1.10f, i) * 0.5f;
float sigmaY = powf(1.10f, i) * 0.5f;
ResamplingFilter fx = filters->Gaussian(sigmaX);
ResamplingFilter fy = filters->Gaussian(sigmaY);
SetFilters(fx, fy);
Prepare();
Run();
char name[64] = {};
snprintf(name, sizeof(name), "blur_%i.png", i);
SaveOutput(name);
}
}
TEST_F(ResamplingTest, Lanczos3) {
SetSource("imgproc/score.png", "imgproc/ref/resampling/score_lanczos3.png");
SetScale(5, 5);
auto filters = GetResamplingFilters();
ResamplingFilter f = filters->Lanczos3();
SetFilters(f, f);
Prepare();
Run();
Verify(1, "score_lanczos_dif.png");
}
TEST_F(ResamplingTest, Cubic) {
SetSource("imgproc/score.png", "imgproc/ref/resampling/score_cubic.png");
SetOutputSize(200, 93);
auto filters = GetResamplingFilters();
ResamplingFilter f = filters->Cubic();
SetFilters(f, f);
Prepare();
Run();
Verify(1, "score_cubic_dif.png");
}
} // namespace kernels
} // namespace dali
|
6961db765766e42a51ba4ca67812492e217d1138.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
int dev=0;
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, dev);
cout << "GPU device" << dev << ":" << devProp.name << std::endl;
| 6961db765766e42a51ba4ca67812492e217d1138.cu | #include<stdio.h>
int dev=0;
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev);
cout << "GPU device" << dev << ":" << devProp.name << std::endl;
|
154480c6cb8c08dd2f01a961b95d3565ab7c8879.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <hip/device_functions.h>
#include <sstream>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "anchorBox_generator.cuh"
#include "error_util.h"
const int cacheMemory = 1000;
__global__ void generate(float* aspect_ratio_layer0, float* aspect_ratio, float* scales_layer0, const float minScale, const float maxScale, const int anchorChannel, float* anchorShapeLayer0, float* anchorShapeLayer1to5)
{
__shared__ float scales[cacheMemory];
__shared__ float aspect_ratio_gpu[cacheMemory];
int idxRow = threadIdx.x + blockDim.x*blockIdx.x;
int idxCol = threadIdx.y + blockDim.y*blockIdx.y;
if (idxRow < anchorChannel) {
scales[idxRow] = minScale + (maxScale - minScale)*idxRow / (anchorChannel - 1);
scales[6] = 1;
aspect_ratio_gpu[idxRow] = aspect_ratio[idxRow];
}
__syncthreads();
int thread_even = min(threadIdx.x * 2, 58);
int thread_odd = min(threadIdx.x * 2 + 1, 59);
anchorShapeLayer1to5[thread_even] = scales[thread_even / (anchorChannel * 2) +1] / sqrt((double)aspect_ratio_gpu[(thread_even % (anchorChannel *2)) / 2]);
anchorShapeLayer1to5[thread_odd] = scales[thread_odd / (anchorChannel * 2) +1] * sqrt((double)aspect_ratio_gpu[(thread_odd % (anchorChannel * 2)) / 2]);
if (thread_even % (anchorChannel * 2) == 10)
{
anchorShapeLayer1to5[thread_even] = sqrt(scales[thread_even / (anchorChannel * 2) + 1] * scales[thread_even / (anchorChannel * 2) + 2]) / sqrt((double)aspect_ratio_gpu[(thread_even % (anchorChannel * 2)) / 2]);
anchorShapeLayer1to5[thread_odd] = sqrt(scales[thread_odd / (anchorChannel * 2) + 1] * scales[thread_odd / (anchorChannel * 2) + 2]) * sqrt((double)aspect_ratio_gpu[(thread_odd % (anchorChannel * 2)) / 2]);
}
int thread_even2 = min(thread_even, 4);
int thread_odd2 = min(thread_odd, 5);
anchorShapeLayer0[thread_even2] = scales_layer0[thread_even2 / 2] / sqrt((double)aspect_ratio_layer0[(thread_even2 % anchorChannel) / 2]);
anchorShapeLayer0[thread_odd2] = scales_layer0[thread_odd2 / 2] * sqrt((double)aspect_ratio_layer0[(thread_odd2 % anchorChannel) / 2]);
}
void anchorbox_generate(float* aspect_ratio_layer0 ,float* aspect_ratio, float* scales_layer0, const float minScale, const float maxScale, const int anchorChannel, float* anchorShapeLayer0, float* anchorShapeLayer1to5, dim3 threads_per_block, dim3 num_of_blocks) {
generate << <num_of_blocks, threads_per_block >> > (aspect_ratio_layer0, aspect_ratio, scales_layer0, minScale, maxScale, anchorChannel, anchorShapeLayer0, anchorShapeLayer1to5);
hipDeviceSynchronize();
} | 154480c6cb8c08dd2f01a961b95d3565ab7c8879.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <device_functions.h>
#include <sstream>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include "anchorBox_generator.cuh"
#include "error_util.h"
const int cacheMemory = 1000;
__global__ void generate(float* aspect_ratio_layer0, float* aspect_ratio, float* scales_layer0, const float minScale, const float maxScale, const int anchorChannel, float* anchorShapeLayer0, float* anchorShapeLayer1to5)
{
__shared__ float scales[cacheMemory];
__shared__ float aspect_ratio_gpu[cacheMemory];
int idxRow = threadIdx.x + blockDim.x*blockIdx.x;
int idxCol = threadIdx.y + blockDim.y*blockIdx.y;
if (idxRow < anchorChannel) {
scales[idxRow] = minScale + (maxScale - minScale)*idxRow / (anchorChannel - 1);
scales[6] = 1;
aspect_ratio_gpu[idxRow] = aspect_ratio[idxRow];
}
__syncthreads();
int thread_even = min(threadIdx.x * 2, 58);
int thread_odd = min(threadIdx.x * 2 + 1, 59);
anchorShapeLayer1to5[thread_even] = scales[thread_even / (anchorChannel * 2) +1] / sqrt((double)aspect_ratio_gpu[(thread_even % (anchorChannel *2)) / 2]);
anchorShapeLayer1to5[thread_odd] = scales[thread_odd / (anchorChannel * 2) +1] * sqrt((double)aspect_ratio_gpu[(thread_odd % (anchorChannel * 2)) / 2]);
if (thread_even % (anchorChannel * 2) == 10)
{
anchorShapeLayer1to5[thread_even] = sqrt(scales[thread_even / (anchorChannel * 2) + 1] * scales[thread_even / (anchorChannel * 2) + 2]) / sqrt((double)aspect_ratio_gpu[(thread_even % (anchorChannel * 2)) / 2]);
anchorShapeLayer1to5[thread_odd] = sqrt(scales[thread_odd / (anchorChannel * 2) + 1] * scales[thread_odd / (anchorChannel * 2) + 2]) * sqrt((double)aspect_ratio_gpu[(thread_odd % (anchorChannel * 2)) / 2]);
}
int thread_even2 = min(thread_even, 4);
int thread_odd2 = min(thread_odd, 5);
anchorShapeLayer0[thread_even2] = scales_layer0[thread_even2 / 2] / sqrt((double)aspect_ratio_layer0[(thread_even2 % anchorChannel) / 2]);
anchorShapeLayer0[thread_odd2] = scales_layer0[thread_odd2 / 2] * sqrt((double)aspect_ratio_layer0[(thread_odd2 % anchorChannel) / 2]);
}
void anchorbox_generate(float* aspect_ratio_layer0 ,float* aspect_ratio, float* scales_layer0, const float minScale, const float maxScale, const int anchorChannel, float* anchorShapeLayer0, float* anchorShapeLayer1to5, dim3 threads_per_block, dim3 num_of_blocks) {
generate << <num_of_blocks, threads_per_block >> > (aspect_ratio_layer0, aspect_ratio, scales_layer0, minScale, maxScale, anchorChannel, anchorShapeLayer0, anchorShapeLayer1to5);
cudaDeviceSynchronize();
} |
d2944824caeb91542b2ab54a97c36a48fc90e1c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaResize.h"
// gpuResize
template<typename T>
__global__ void gpuResize( float2 scale, T* input, int iWidth, T* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const T px = input[ dy * iWidth + dx ];
output[y*oWidth+x] = px;
}
// launchResize
template<typename T>
static hipError_t launchResize( T* input, size_t inputWidth, size_t inputHeight,
T* output, size_t outputWidth, size_t outputHeight )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuResize<T>), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(hipGetLastError());
}
// cudaResize (uint8 grayscale)
hipError_t cudaResize( uint8_t* input, size_t inputWidth, size_t inputHeight, uint8_t* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uint8_t>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float grayscale)
hipError_t cudaResize( float* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (uchar3)
hipError_t cudaResize( uchar3* input, size_t inputWidth, size_t inputHeight, uchar3* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uchar3>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (uchar4)
hipError_t cudaResize( uchar4* input, size_t inputWidth, size_t inputHeight, uchar4* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uchar4>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float3)
hipError_t cudaResize( float3* input, size_t inputWidth, size_t inputHeight, float3* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float3>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float4)
hipError_t cudaResize( float4* input, size_t inputWidth, size_t inputHeight, float4* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float4>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
//-----------------------------------------------------------------------------------
hipError_t cudaResize( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight, imageFormat format )
{
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
return cudaResize((uchar3*)input, inputWidth, inputHeight, (uchar3*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
return cudaResize((uchar4*)input, inputWidth, inputHeight, (uchar4*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
return cudaResize((float3*)input, inputWidth, inputHeight, (float3*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
return cudaResize((float4*)input, inputWidth, inputHeight, (float4*)output, outputWidth, outputHeight);
else if( format == IMAGE_GRAY8 )
return cudaResize((uint8_t*)input, inputWidth, inputHeight, (uint8_t*)output, outputWidth, outputHeight);
else if( format == IMAGE_GRAY32F )
return cudaResize((float*)input, inputWidth, inputHeight, (float*)output, outputWidth, outputHeight);
LogError(LOG_CUDA "cudaResize() -- invalid image format '%s'\n", imageFormatToStr(format));
LogError(LOG_CUDA " supported formats are:\n");
LogError(LOG_CUDA " * gray8\n");
LogError(LOG_CUDA " * gray32f\n");
LogError(LOG_CUDA " * rgb8, bgr8\n");
LogError(LOG_CUDA " * rgba8, bgra8\n");
LogError(LOG_CUDA " * rgb32f, bgr32f\n");
LogError(LOG_CUDA " * rgba32f, bgra32f\n");
return hipErrorInvalidValue;
}
| d2944824caeb91542b2ab54a97c36a48fc90e1c4.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaResize.h"
// gpuResize
template<typename T>
__global__ void gpuResize( float2 scale, T* input, int iWidth, T* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const T px = input[ dy * iWidth + dx ];
output[y*oWidth+x] = px;
}
// launchResize
template<typename T>
static cudaError_t launchResize( T* input, size_t inputWidth, size_t inputHeight,
T* output, size_t outputWidth, size_t outputHeight )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuResize<T><<<gridDim, blockDim>>>(scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(cudaGetLastError());
}
// cudaResize (uint8 grayscale)
cudaError_t cudaResize( uint8_t* input, size_t inputWidth, size_t inputHeight, uint8_t* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uint8_t>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float grayscale)
cudaError_t cudaResize( float* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (uchar3)
cudaError_t cudaResize( uchar3* input, size_t inputWidth, size_t inputHeight, uchar3* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uchar3>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (uchar4)
cudaError_t cudaResize( uchar4* input, size_t inputWidth, size_t inputHeight, uchar4* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<uchar4>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float3)
cudaError_t cudaResize( float3* input, size_t inputWidth, size_t inputHeight, float3* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float3>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
// cudaResize (float4)
cudaError_t cudaResize( float4* input, size_t inputWidth, size_t inputHeight, float4* output, size_t outputWidth, size_t outputHeight )
{
return launchResize<float4>(input, inputWidth, inputHeight, output, outputWidth, outputHeight);
}
//-----------------------------------------------------------------------------------
cudaError_t cudaResize( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight, imageFormat format )
{
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
return cudaResize((uchar3*)input, inputWidth, inputHeight, (uchar3*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
return cudaResize((uchar4*)input, inputWidth, inputHeight, (uchar4*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
return cudaResize((float3*)input, inputWidth, inputHeight, (float3*)output, outputWidth, outputHeight);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
return cudaResize((float4*)input, inputWidth, inputHeight, (float4*)output, outputWidth, outputHeight);
else if( format == IMAGE_GRAY8 )
return cudaResize((uint8_t*)input, inputWidth, inputHeight, (uint8_t*)output, outputWidth, outputHeight);
else if( format == IMAGE_GRAY32F )
return cudaResize((float*)input, inputWidth, inputHeight, (float*)output, outputWidth, outputHeight);
LogError(LOG_CUDA "cudaResize() -- invalid image format '%s'\n", imageFormatToStr(format));
LogError(LOG_CUDA " supported formats are:\n");
LogError(LOG_CUDA " * gray8\n");
LogError(LOG_CUDA " * gray32f\n");
LogError(LOG_CUDA " * rgb8, bgr8\n");
LogError(LOG_CUDA " * rgba8, bgra8\n");
LogError(LOG_CUDA " * rgb32f, bgr32f\n");
LogError(LOG_CUDA " * rgba32f, bgra32f\n");
return cudaErrorInvalidValue;
}
|
f46e1ddc4a23a238d19e9ddde8284012058291df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Adaptive Non Local Means - Parallel Implementation
*
* Authors: Athanasiadis Christos [email protected] AEM 8416
* Matsoukas Vasileios [email protected] AEM 8743
*
* adaptive NLM using loop for each region
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
#define MAX 512
struct timeval startwtime, endwtime;
float seq_time=0;
float* image_pad(float *im, int m, int n, int patchsize_x, int patchsize_y);
float* gauss_patch(float* gaussian, int offsetx, int offsety, float patchSigma);
//------------Kernel functions------------//
__global__ void neighbs(float* im, float* neighbors, float* gauss, int offsety, int offsetx, int patch_size, int newpatch_size, int s_x, int s_y, int patchsize_y){
int id = threadIdx.x+blockIdx.x*blockDim.x;
int m = s_y - 2*offsety;
int n = s_x - 2*offsetx;
if (id<m*n){
int kx = blockIdx.y-offsetx;
int ky = blockIdx.z-offsety;
int neighbor_id = blockIdx.y*patchsize_y+blockIdx.z;
neighbors[id*newpatch_size+neighbor_id] = im[(offsety+ky+id%m)*s_x+kx+offsetx+(id/m)]*gauss[neighbor_id];
}
}
__global__ void affinity(float *image, float *neighbors, int pixels, int patchsize, float std, int offsetx,int offsety,int patchsize_y, float *row, int* reg){
__shared__ float Ys[16][16];
__shared__ float Xs[16][16];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int yBegin = by * 16 * patchsize;
int xBegin = bx * 16 * patchsize;
int yEnd = yBegin + patchsize - 1, y, x, k;
float tmp, c = 0 , s = 0;
int pixel_x = bx*16 + tx;
int pixel_y = by*16 + ty;
int t = 0;
for(y=yBegin,x=xBegin; y<=yEnd; y+=16,x+=16){
if (pixel_y < pixels) Ys[ty][tx] = neighbors[reg[pixel_y]*patchsize + tx + t*16];
else Ys[ty][tx] = 0;
if (bx*16+ty < pixels) Xs[tx][ty] = neighbors[reg[bx*16 + ty]*patchsize + tx + t*16];
else Xs[tx][ty] = 0;
t++;
__syncthreads();
for(k=0;k<16;k++){
tmp = Ys[ty][k] - Xs[k][tx];
s += tmp*tmp;
}
__syncthreads();
}
if (pixel_y < pixels && pixel_x < pixels){
Xs[ty][tx] = exp(-s/std);
Ys[ty][tx] = Xs[ty][tx]*neighbors[reg[pixel_x]*patchsize+patchsize_y*offsetx+offsety];
}
else {
Ys[ty][tx] = 0;
Xs[ty][tx] = 0;
}
__syncthreads();
if (pixel_y< pixels && tx==0){
s=0;
for (k=0; k<16; k++){
c+=Ys[ty][k];
s+=Xs[ty][k];
}
atomicAdd(&image[reg[pixel_y]], c); atomicAdd(&row[reg[pixel_y]],s);
}
}
__global__ void newimage(float* im, float* row, int k, int n){
int id = blockIdx.x*n + blockIdx.y;
if (id < k) im[id] = im[id]/row[id];
}
int main(int argc, char **argv){
if (argc!=5){
printf("Wrong number of inputs.\n");
printf("Usage: %s binary_input binary_output image_rows image_columns \n",argv[0]);
exit(0);
}
FILE *fp; //file pointer to the binary image file
int m = atoi(argv[3]); //image rows
int n = atoi(argv[4]); //image columns
int patchsize_x = 5, patchsize_y = 5;
int patch_size = patchsize_x*patchsize_y;
float patchSigma = 1.6667;
int nLevel = 6;
int size_y = m + (patchsize_y-1);
int offsety = (patchsize_y-1)/2;
int size_x = n + (patchsize_x-1);
int offsetx = (patchsize_x-1)/2;
float *im,*new_im, *gaussian;
float *d_im,*d_new_im, *d_gaussian, *d_neighbors, *d_row;
int blocksx,blocksy,blocksz;
int* L = ( int *)malloc(m*n*sizeof( int *));
float* mean = (float *)calloc(nLevel,sizeof( float ));
float* std = (float *)calloc(nLevel,sizeof( float ));
int* count = (int *)calloc(nLevel,sizeof(int));
//Allocate (1-D) memory for input image and filtered image
im = ( float *)malloc(size_y*size_x*sizeof( float ));
new_im=(float *)malloc(m*n*sizeof(float));
//Open data file
fp=fopen(argv[1],"rb");
if (!fp){
printf("Unable to open file!");
return 1;
}
int index=0;
//Store input image data and find sigma for each region
for (int i=offsety; i<size_y-offsety; i++){
fread(im+i*size_x+offsetx,sizeof(float),n,fp);
for (int j=0; j<n; j++){
index= j*m+(i-offsety);
L[index] = round((nLevel-1)*im[(index%m+offsety)*size_x + index/m +offsetx]);
mean[L[index]]+= im[(index%m+offsety)*size_x + index/m +offsetx];
count[L[index]]++;
}
}
fclose(fp);
int** regions = ( int **)malloc(nLevel*sizeof( int *));
for (int i=0; i<nLevel; i++)
regions[i] = ( int *)malloc(count[i]*sizeof(int));
int* regionscount = (int *)calloc(nLevel,sizeof(int));
for (int i=0; i<nLevel; i++) mean[i]/=count[i];
int reg;
for (int j=0; j<n; j++){
for (int i=0; i<m; i++){
index= j*m+i;
reg = L[index];
std[reg]+=(im[(index%m+offsety)*size_x + index/m +offsetx]-mean[reg])*(im[(index%m+offsety)*size_x + index/m +offsetx]-mean[reg]);
regions[reg][regionscount[reg]++] = index;
}
}
for (int i=0; i<nLevel; i++) std[i]/=(count[i]-1);
//Pad image's borders symmetrically
im=image_pad(im,m,n,patchsize_x,patchsize_y);
//Create gaussian patch
gaussian = ( float *)malloc(patch_size*sizeof( float));
gaussian = gauss_patch(gaussian,offsetx,offsety,patchSigma);
/////// Set new patchsize /////////
int newpatch_size = patch_size;
if (patch_size%16!=0) newpatch_size = (patch_size/16 + 1)*16;
//Allocate space for device copies
hipMalloc((void **)&d_im, size_x*size_y*sizeof(float));
hipMalloc((void **)&d_new_im, m*n*sizeof(float));
hipMalloc((void **)&d_row, m*n*sizeof(float));
hipMalloc((void **)&d_neighbors, (m*n)*newpatch_size*sizeof(float));
hipMalloc((void **)&d_gaussian, patch_size*sizeof(float));
//Copy inputs to device
hipMemcpy(d_im, im, size_x*size_y*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_gaussian, gaussian, patch_size*sizeof(float), hipMemcpyHostToDevice);
free(mean);free(gaussian);free(im);free(L);
/////// Cuda Memset /////////
hipMemset(d_neighbors, 0, (m*n)*newpatch_size*sizeof(float));
hipMemset(d_row, 0, (m*n)*sizeof(float));
hipMemset(d_new_im, 0, (m*n)*sizeof(float));
//Finding neighbors, number_of_neighbors % 16 = 0
blocksx = (m*n)/MAX;
if ((m*n)%MAX!=0)
blocksx++;
blocksy = patchsize_x;
blocksz = patchsize_y;
//Begin Chronometer
gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( neighbs), dim3(dim3(blocksx,blocksy,blocksz)),dim3(MAX), 0, 0, d_im, d_neighbors, d_gaussian, offsety, offsetx, patch_size, newpatch_size, size_x, size_y, patchsize_y);
int k;
for (int q=0; q<nLevel; q++){
int *d_region;
k = count[q];
hipMalloc((void **)&d_region, k*sizeof(int));
hipMemcpy(d_region, regions[q] , k*sizeof(int), hipMemcpyHostToDevice);
//Finding the affinity matrix, k%16 = 0
blocksx = k/16;
if (k%16!=0) blocksx = (blocksx/16+1)*16;
blocksy = blocksx;
hipLaunchKernelGGL(( affinity), dim3(dim3(blocksx, blocksy, 1)),dim3(dim3(16, 16, 1)), 0, 0, d_new_im, d_neighbors, k, newpatch_size, std[q], offsetx, offsety, patchsize_y, d_row, d_region);
hipFree(d_region);
}
hipLaunchKernelGGL(( newimage), dim3(dim3(m, n)), dim3(1), 0, 0, d_new_im, d_row, m*n, n);
//Stop chronometer
hipDeviceSynchronize();
gettimeofday (&endwtime, NULL);
printf("\n");
seq_time = (float)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6+ endwtime.tv_sec - startwtime.tv_sec);
printf("Total time needed using adaptive nlmeans: %f secs\n", seq_time);
hipMemcpy(new_im,d_new_im, m*n*sizeof(float), hipMemcpyDeviceToHost );
// printf("Thats the filtered image:\n");
//
//
// for (int j=0; j<m; j++){
// for (int i=0; i<n ; i++)
// printf( "%f ",new_im[i*m+j]);
//
// printf("\n\n");
// }
//Save output to a binary file
fp=fopen(argv[2],"w");
for (int j=0; j<m; j++){
for (int i=0; i<n ; i++)
fwrite(&new_im[i*m+j],sizeof(float),1,fp);
}
fclose(fp);
//Free allocated memory
hipFree(d_im); hipFree(d_gaussian); hipFree(d_neighbors); hipFree(d_new_im); hipFree(d_row);
free(std); free(new_im);
return 0;
}
//Host functions
float* image_pad(float *im, int m, int n, int patchsize_x, int patchsize_y){
int size_y = m + (patchsize_y-1);
int offsety = (patchsize_y-1)/2;
int size_x = n + (patchsize_x-1);
int offsetx = (patchsize_x-1)/2;
//Padding the data for image's borders
for (int i=0; i<offsety; i++){
for (int j=offsetx ; j<size_x - offsetx; j++)
im[i*size_x+j] = im[(2*offsety-1-i)*size_x+j];
}
for (int i=0; i<offsety; i++){
for (int j=offsetx ; j<size_x - offsetx; j++)
im[(size_y-offsety+i)*size_x+j] = im[(size_y-offsety-i-1)*size_x+j];
}
for (int i=0; i<size_y; i++){
for (int j=0 ; j<offsetx; j++)
im[i*size_x+j] = im[i*size_x+2*offsetx-1-j];
}
for (int i=0; i<size_y; i++){
for (int j=0 ; j<offsetx; j++)
im[i*size_x+size_x-offsetx+j] = im[i*size_x+size_x-offsetx-1-j];
}
return im;
}
float* gauss_patch(float *gaussian, int offsetx, int offsety, float patchSigma){
int u=0;
for (int kx=-offsetx; kx<offsetx+1; kx++){
for (int ky = -offsety; ky<offsety+1; ky++){
gaussian[u] = exp(-(ky*ky+kx*kx)/(2*patchSigma*patchSigma));
u++;
}
}
return gaussian;
}
| f46e1ddc4a23a238d19e9ddde8284012058291df.cu | /*
* Adaptive Non Local Means - Parallel Implementation
*
* Authors: Athanasiadis Christos [email protected] AEM 8416
* Matsoukas Vasileios [email protected] AEM 8743
*
* adaptive NLM using loop for each region
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
#define MAX 512
struct timeval startwtime, endwtime;
float seq_time=0;
float* image_pad(float *im, int m, int n, int patchsize_x, int patchsize_y);
float* gauss_patch(float* gaussian, int offsetx, int offsety, float patchSigma);
//------------Kernel functions------------//
__global__ void neighbs(float* im, float* neighbors, float* gauss, int offsety, int offsetx, int patch_size, int newpatch_size, int s_x, int s_y, int patchsize_y){
int id = threadIdx.x+blockIdx.x*blockDim.x;
int m = s_y - 2*offsety;
int n = s_x - 2*offsetx;
if (id<m*n){
int kx = blockIdx.y-offsetx;
int ky = blockIdx.z-offsety;
int neighbor_id = blockIdx.y*patchsize_y+blockIdx.z;
neighbors[id*newpatch_size+neighbor_id] = im[(offsety+ky+id%m)*s_x+kx+offsetx+(id/m)]*gauss[neighbor_id];
}
}
__global__ void affinity(float *image, float *neighbors, int pixels, int patchsize, float std, int offsetx,int offsety,int patchsize_y, float *row, int* reg){
__shared__ float Ys[16][16];
__shared__ float Xs[16][16];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int yBegin = by * 16 * patchsize;
int xBegin = bx * 16 * patchsize;
int yEnd = yBegin + patchsize - 1, y, x, k;
float tmp, c = 0 , s = 0;
int pixel_x = bx*16 + tx;
int pixel_y = by*16 + ty;
int t = 0;
for(y=yBegin,x=xBegin; y<=yEnd; y+=16,x+=16){
if (pixel_y < pixels) Ys[ty][tx] = neighbors[reg[pixel_y]*patchsize + tx + t*16];
else Ys[ty][tx] = 0;
if (bx*16+ty < pixels) Xs[tx][ty] = neighbors[reg[bx*16 + ty]*patchsize + tx + t*16];
else Xs[tx][ty] = 0;
t++;
__syncthreads();
for(k=0;k<16;k++){
tmp = Ys[ty][k] - Xs[k][tx];
s += tmp*tmp;
}
__syncthreads();
}
if (pixel_y < pixels && pixel_x < pixels){
Xs[ty][tx] = exp(-s/std);
Ys[ty][tx] = Xs[ty][tx]*neighbors[reg[pixel_x]*patchsize+patchsize_y*offsetx+offsety];
}
else {
Ys[ty][tx] = 0;
Xs[ty][tx] = 0;
}
__syncthreads();
if (pixel_y< pixels && tx==0){
s=0;
for (k=0; k<16; k++){
c+=Ys[ty][k];
s+=Xs[ty][k];
}
atomicAdd(&image[reg[pixel_y]], c); atomicAdd(&row[reg[pixel_y]],s);
}
}
__global__ void newimage(float* im, float* row, int k, int n){
int id = blockIdx.x*n + blockIdx.y;
if (id < k) im[id] = im[id]/row[id];
}
int main(int argc, char **argv){
if (argc!=5){
printf("Wrong number of inputs.\n");
printf("Usage: %s binary_input binary_output image_rows image_columns \n",argv[0]);
exit(0);
}
FILE *fp; //file pointer to the binary image file
int m = atoi(argv[3]); //image rows
int n = atoi(argv[4]); //image columns
int patchsize_x = 5, patchsize_y = 5;
int patch_size = patchsize_x*patchsize_y;
float patchSigma = 1.6667;
int nLevel = 6;
int size_y = m + (patchsize_y-1);
int offsety = (patchsize_y-1)/2;
int size_x = n + (patchsize_x-1);
int offsetx = (patchsize_x-1)/2;
float *im,*new_im, *gaussian;
float *d_im,*d_new_im, *d_gaussian, *d_neighbors, *d_row;
int blocksx,blocksy,blocksz;
int* L = ( int *)malloc(m*n*sizeof( int *));
float* mean = (float *)calloc(nLevel,sizeof( float ));
float* std = (float *)calloc(nLevel,sizeof( float ));
int* count = (int *)calloc(nLevel,sizeof(int));
//Allocate (1-D) memory for input image and filtered image
im = ( float *)malloc(size_y*size_x*sizeof( float ));
new_im=(float *)malloc(m*n*sizeof(float));
//Open data file
fp=fopen(argv[1],"rb");
if (!fp){
printf("Unable to open file!");
return 1;
}
int index=0;
//Store input image data and find sigma for each region
for (int i=offsety; i<size_y-offsety; i++){
fread(im+i*size_x+offsetx,sizeof(float),n,fp);
for (int j=0; j<n; j++){
index= j*m+(i-offsety);
L[index] = round((nLevel-1)*im[(index%m+offsety)*size_x + index/m +offsetx]);
mean[L[index]]+= im[(index%m+offsety)*size_x + index/m +offsetx];
count[L[index]]++;
}
}
fclose(fp);
int** regions = ( int **)malloc(nLevel*sizeof( int *));
for (int i=0; i<nLevel; i++)
regions[i] = ( int *)malloc(count[i]*sizeof(int));
int* regionscount = (int *)calloc(nLevel,sizeof(int));
for (int i=0; i<nLevel; i++) mean[i]/=count[i];
int reg;
for (int j=0; j<n; j++){
for (int i=0; i<m; i++){
index= j*m+i;
reg = L[index];
std[reg]+=(im[(index%m+offsety)*size_x + index/m +offsetx]-mean[reg])*(im[(index%m+offsety)*size_x + index/m +offsetx]-mean[reg]);
regions[reg][regionscount[reg]++] = index;
}
}
for (int i=0; i<nLevel; i++) std[i]/=(count[i]-1);
//Pad image's borders symmetrically
im=image_pad(im,m,n,patchsize_x,patchsize_y);
//Create gaussian patch
gaussian = ( float *)malloc(patch_size*sizeof( float));
gaussian = gauss_patch(gaussian,offsetx,offsety,patchSigma);
/////// Set new patchsize /////////
int newpatch_size = patch_size;
if (patch_size%16!=0) newpatch_size = (patch_size/16 + 1)*16;
//Allocate space for device copies
cudaMalloc((void **)&d_im, size_x*size_y*sizeof(float));
cudaMalloc((void **)&d_new_im, m*n*sizeof(float));
cudaMalloc((void **)&d_row, m*n*sizeof(float));
cudaMalloc((void **)&d_neighbors, (m*n)*newpatch_size*sizeof(float));
cudaMalloc((void **)&d_gaussian, patch_size*sizeof(float));
//Copy inputs to device
cudaMemcpy(d_im, im, size_x*size_y*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian, gaussian, patch_size*sizeof(float), cudaMemcpyHostToDevice);
free(mean);free(gaussian);free(im);free(L);
/////// Cuda Memset /////////
cudaMemset(d_neighbors, 0, (m*n)*newpatch_size*sizeof(float));
cudaMemset(d_row, 0, (m*n)*sizeof(float));
cudaMemset(d_new_im, 0, (m*n)*sizeof(float));
//Finding neighbors, number_of_neighbors % 16 = 0
blocksx = (m*n)/MAX;
if ((m*n)%MAX!=0)
blocksx++;
blocksy = patchsize_x;
blocksz = patchsize_y;
//Begin Chronometer
gettimeofday (&startwtime, NULL);
neighbs<<<dim3(blocksx,blocksy,blocksz),MAX>>>(d_im, d_neighbors, d_gaussian, offsety, offsetx, patch_size, newpatch_size, size_x, size_y, patchsize_y);
int k;
for (int q=0; q<nLevel; q++){
int *d_region;
k = count[q];
cudaMalloc((void **)&d_region, k*sizeof(int));
cudaMemcpy(d_region, regions[q] , k*sizeof(int), cudaMemcpyHostToDevice);
//Finding the affinity matrix, k%16 = 0
blocksx = k/16;
if (k%16!=0) blocksx = (blocksx/16+1)*16;
blocksy = blocksx;
affinity<<<dim3(blocksx, blocksy, 1),dim3(16, 16, 1)>>>(d_new_im, d_neighbors, k, newpatch_size, std[q], offsetx, offsety, patchsize_y, d_row, d_region);
cudaFree(d_region);
}
newimage<<<dim3(m, n), 1>>>(d_new_im, d_row, m*n, n);
//Stop chronometer
cudaDeviceSynchronize();
gettimeofday (&endwtime, NULL);
printf("\n");
seq_time = (float)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6+ endwtime.tv_sec - startwtime.tv_sec);
printf("Total time needed using adaptive nlmeans: %f secs\n", seq_time);
cudaMemcpy(new_im,d_new_im, m*n*sizeof(float), cudaMemcpyDeviceToHost );
// printf("Thats the filtered image:\n");
//
//
// for (int j=0; j<m; j++){
// for (int i=0; i<n ; i++)
// printf( "%f ",new_im[i*m+j]);
//
// printf("\n\n");
// }
//Save output to a binary file
fp=fopen(argv[2],"w");
for (int j=0; j<m; j++){
for (int i=0; i<n ; i++)
fwrite(&new_im[i*m+j],sizeof(float),1,fp);
}
fclose(fp);
//Free allocated memory
cudaFree(d_im); cudaFree(d_gaussian); cudaFree(d_neighbors); cudaFree(d_new_im); cudaFree(d_row);
free(std); free(new_im);
return 0;
}
//Host functions
float* image_pad(float *im, int m, int n, int patchsize_x, int patchsize_y){
int size_y = m + (patchsize_y-1);
int offsety = (patchsize_y-1)/2;
int size_x = n + (patchsize_x-1);
int offsetx = (patchsize_x-1)/2;
//Padding the data for image's borders
for (int i=0; i<offsety; i++){
for (int j=offsetx ; j<size_x - offsetx; j++)
im[i*size_x+j] = im[(2*offsety-1-i)*size_x+j];
}
for (int i=0; i<offsety; i++){
for (int j=offsetx ; j<size_x - offsetx; j++)
im[(size_y-offsety+i)*size_x+j] = im[(size_y-offsety-i-1)*size_x+j];
}
for (int i=0; i<size_y; i++){
for (int j=0 ; j<offsetx; j++)
im[i*size_x+j] = im[i*size_x+2*offsetx-1-j];
}
for (int i=0; i<size_y; i++){
for (int j=0 ; j<offsetx; j++)
im[i*size_x+size_x-offsetx+j] = im[i*size_x+size_x-offsetx-1-j];
}
return im;
}
float* gauss_patch(float *gaussian, int offsetx, int offsety, float patchSigma){
int u=0;
for (int kx=-offsetx; kx<offsetx+1; kx++){
for (int ky = -offsety; ky<offsety+1; ky++){
gaussian[u] = exp(-(ky*ky+kx*kx)/(2*patchSigma*patchSigma));
u++;
}
}
return gaussian;
}
|
83405156444c39aeb081d26f248c841ced10d965.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <stdio.h>
#include <sstream>
#include <cmath>
#include "utils.h"
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using std::string;
// if after iteration is over this number, we can't get the residual decreased by a factor n, then stop the program.
const int stopAfterIterNum = 5000;
// common utils
std::ostringstream strs;
// g++ -std=c++11 -O3 -o jacobi2D-omp jacobi2D-omp.cpp
// g++-9 -std=c++11 -O3 -o jacobi2D -fopenmp jacobi2D.cpp
// nvcc -std=c++11 -Xcompiler -fopenmp -o jacobi2D jacobi2D.cu
// Without parallel: N=10, run time = 0.011500 second(s).
int calcRowMajorIndex(int i, int j, int columnSize){
int idx = (j + ((i - 1) * columnSize)) - 1;
return idx;
}
__device__ int calcRowMajorIndex_d(int i, int j, int columnSize) {
int idx = j + (i * columnSize);
// printf("i=%d, j=%d, columnSize=%d, idx=%d\n", i, j, columnSize, idx);
return idx;
}
void calculate_next_u(int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
// use the Jacobi method
// calculate arr_u_1
/*
for(int i = 1; i <= n; i++){
for(int j = 1; j <= n; j++){
int idx_i_j = calcRowMajorIndex(i, j, n);
printf("u_k[i=%d][j=%d]=%10f\n", i, j, arr_u_k[idx_i_j]);
}
}
*/
for(int i = 1; i <= n; i++){
#pragma omp parallel for
for(int j = 1; j <= n; j++){
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex(i, j, n);
f_i_j = arr_f[idx_i_j];
if(i - 1 > 0){
int idx_im1_j = calcRowMajorIndex(i - 1, j, n);
u_k_im1_j = arr_u_k[idx_im1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_im1_j, u_k_im1_j);
}
if(j - 1 > 0){
int idx_i_jm1 = calcRowMajorIndex(i, j - 1, n);
u_k_i_jm1 = arr_u_k[idx_i_jm1];
}
if(i < n){
int idx_ip1_j = calcRowMajorIndex(i + 1, j, n);
u_k_ip1_j = arr_u_k[idx_ip1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_ip1_j, u_k_ip1_j);
}
if(j < n){
int idx_i_jp1 = calcRowMajorIndex(i, j + 1, n);
u_k_i_jp1 = arr_u_k[idx_i_jp1];
// printf("arr_u_k[%ld]=%10f\n", idx_i_jp1, u_k_i_jp1);
}
arr_u_k_plus_1[idx_i_j] = (::pow(h, 2) * f_i_j + u_k_im1_j + u_k_i_jm1 + u_k_ip1_j + u_k_i_jp1) / 4;
// printf("i=%d, j=%d, u(k+1)_i_j=%10f, f_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", i, j, arr_u_k_plus_1[idx_i_j], f_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
}
}
}
double calculateResidualNorm(int n, long range, double h, double* arr_u, double* arr_f){
// calculate residual matrix A * u(k) - f
double norm;
double residualSqSum = 0;
for(int i = 1; i <= n; i++){
#pragma omp parallel for reduction(+: residualSqSum)
for(int j = 1; j <= n; j++){
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_i_j = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex(i, j, n);
f_i_j = arr_f[idx_i_j];
u_k_i_j = arr_u[idx_i_j];
// printf("[i=%d][j=%d] u_k_i_j=%10f\n", i, j, u_k_i_j);
if((i - 1) > 0){
int idx_im1_j = calcRowMajorIndex(i - 1, j, n);
u_k_im1_j = arr_u[idx_im1_j];
// printf("[i=%d][j=%d] u_k_im1_j=%10f\n", i, j, u_k_im1_j);
}
if((j - 1) > 0){
int idx_i_jm1 = calcRowMajorIndex(i, j - 1, n);
u_k_i_jm1 = arr_u[idx_i_jm1];
// printf("[i=%d][j=%d] u_k_i_jm1=%10f\n", i, j, u_k_i_jm1);
}
if(i < n){
int idx_ip1_j = calcRowMajorIndex(i + 1, j, n);
u_k_ip1_j = arr_u[idx_ip1_j];
// printf("[i=%d][j=%d] u_k_ip1_j=%10f\n", i, j, u_k_ip1_j);
}
if(j < n){
int idx_i_jp1 = calcRowMajorIndex(i, j + 1, n);
u_k_i_jp1 = arr_u[idx_i_jp1];
// printf("[i=%d][j=%d] u_k_i_jp1=%10f\n", i, j, u_k_i_jp1);
}
// residual = f_i_j - ((- u_im1_j - u_i_jm1 + 4 u_i_j - u_ip1_j - u_i_jp1)/(h ^ 2))
double a_mult_u = (-1 * u_k_im1_j - u_k_i_jm1 + 4 * u_k_i_j - u_k_ip1_j - u_k_i_jp1) / (::pow(h, 2.0));
double residual = f_i_j - a_mult_u;
// printf("res[%ld][%ld]=%10f\n", i, j, residual);
residualSqSum += ::pow(residual, 2);
}
}
norm = std::sqrt(residualSqSum);
return norm;
}
void processNextIter(int iterNumber, double initialNorm, int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
calculate_next_u(n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
double thisNorm = calculateResidualNorm(n, range, h, arr_u_k_plus_1, arr_f);
double decreasingFactor = initialNorm / thisNorm;
/*
std::cout << "Iter[";
std::cout << iterNumber;
std::cout << "]:norm=";
std::cout << thisNorm;
std::cout << ", decreasingFactor=";
std::cout << decreasingFactor;
std::cout << "\n";
*/
bool greaterThanStopCond = false;
if(decreasingFactor > ::pow(10, 6)){
greaterThanStopCond = true;
}
// terminate the iteration when the initial residual is decreased by a factor of 106 or after 5000 iterations.
if(greaterThanStopCond || (iterNumber >= stopAfterIterNum)){
printf("Iter[%d]:norm=%10f, decreasingFactor=%10f\n", iterNumber, decreasingFactor);
return;
}else{
iterNumber++;
double *swap = arr_u_k;
arr_u_k = arr_u_k_plus_1;
arr_u_k_plus_1 = swap;
processNextIter(iterNumber, initialNorm, n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
return;
}
}
__global__
void calculate_next_u_d(int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
// use the Jacobi method
// calculate arr_u_1
int i = blockIdx.x;
int j = threadIdx.x;
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex_d(i, j, n);
f_i_j = arr_f[idx_i_j];
// printf("f[%d][%d]=%10f\n", i, j , f_i_j);
if(i > 0){
int idx_im1_j = calcRowMajorIndex_d(i - 1, j, n);
u_k_im1_j = arr_u_k[idx_im1_j];
}
if(j > 0){
int idx_i_jm1 = calcRowMajorIndex_d(i, j - 1, n);
u_k_i_jm1 = arr_u_k[idx_i_jm1];
}
if(i < (n - 1)){
int idx_ip1_j = calcRowMajorIndex_d(i + 1, j, n);
u_k_ip1_j = arr_u_k[idx_ip1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_ip1_j, u_k_ip1_j);
}
if(j < (n - 1)){
int idx_i_jp1 = calcRowMajorIndex_d(i, j + 1, n);
u_k_i_jp1 = arr_u_k[idx_i_jp1];
// printf("arr_u_k[%ld]=%10f\n", idx_i_jp1, u_k_i_jp1);
}
arr_u_k_plus_1[idx_i_j] = (::pow(h, 2) * f_i_j + u_k_im1_j + u_k_i_jm1 + u_k_ip1_j + u_k_i_jp1) / 4;
// printf("i=%d, j=%d, u(k+1)_i_j=%10f, f_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", i, j, arr_u_k_plus_1[idx_i_j], f_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
}
__global__ void calculateResidualSqSumPerRow_d(int n, long range, double h, double* arr_u, double* arr_f, double* residualSqSum, int residualSqSumRange){
// calculate residual matrix A * u(k) - f
int i = blockIdx.x;
int j = threadIdx.x;
extern __shared__ double tempResidualSqSum[];
tempResidualSqSum[threadIdx.x] = 0;
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_i_j = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex_d(i, j, n);
// printf("i=%d, j=%d, idx_i_j=%d\n", i, j, idx_i_j);
f_i_j = arr_f[idx_i_j];
// printf("[i=%d][j=%d] f_i_j=%10f\n", i, j, f_i_j);
u_k_i_j = arr_u[idx_i_j];
// printf("[i=%d][j=%d] u_k_i_j=%10f\n", i, j, u_k_i_j);
if(i > 0){
int idx_im1_j = calcRowMajorIndex_d(i - 1, j, n);
u_k_im1_j = arr_u[idx_im1_j];
// printf("[i=%d][j=%d] u_k_im1_j=%10f\n", i, j, u_k_im1_j);
}
if(j > 0){
int idx_i_jm1 = calcRowMajorIndex_d(i, j - 1, n);
u_k_i_jm1 = arr_u[idx_i_jm1];
// printf("[i=%d][j=%d] u_k_i_jm1=%10f\n", i, j, u_k_i_jm1);
}
if(i < (n - 1)){
int idx_ip1_j = calcRowMajorIndex_d(i + 1, j, n);
u_k_ip1_j = arr_u[idx_ip1_j];
// printf("[i=%d][j=%d] u_k_ip1_j=%10f\n", i, j, u_k_ip1_j);
}
if(j < (n - 1)){
int idx_i_jp1 = calcRowMajorIndex_d(i, j + 1, n);
u_k_i_jp1 = arr_u[idx_i_jp1];
// printf("[i=%d][j=%d] u_k_i_jp1=%10f\n", i, j, u_k_i_jp1);
}
// residual = f_i_j - ((- u_im1_j - u_i_jm1 + 4 u_i_j - u_ip1_j - u_i_jp1)/(h ^ 2))
double a_mult_u = (-1 * u_k_im1_j - u_k_i_jm1 + 4 * u_k_i_j - u_k_ip1_j - u_k_i_jp1) / (::pow(h, 2.0));
double residual = f_i_j - a_mult_u;
// printf("res[%d][%d]=%10f\n", i, j, residual);
tempResidualSqSum[j] = ::pow(residual, 2);
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
// printf("[%d] tempResidualSqSum[%d]=%ld\n", i, threadIdx.x, tempResidualSqSum[threadIdx.x]);
tempResidualSqSum[threadIdx.x] += tempResidualSqSum[threadIdx.x + s];
// printf("[%d] +tempResidualSqSum[%d]=%ld\n", i, threadIdx.x + s, tempResidualSqSum[threadIdx.x + s]);
// printf("[%d] => tempResidualSqSum[%d]=%ld\n", i, threadIdx.x, tempResidualSqSum[threadIdx.x]);
}
__syncthreads();
}
// printf("1threadIdx.x=%d\n", threadIdx.x);
if(threadIdx.x == 0){
// printf("2threadIdx.x=%d\n", threadIdx.x);
residualSqSum[i] = tempResidualSqSum[threadIdx.x];
// printf("residualSqSum[%d]=%10f\n", i, residualSqSum[i]);
}
// printf("residualSqSum=%10f\n", residualSqSum);
// printf("i=%ld, j=%ld, residual=%10f\n", i, j ,residual);
// printf("f_i_j=%10f, u_k_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", f_i_j, u_k_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
// }
// }
}
__global__ void calculateResidualNorm_d(double* residualSqSum, double* norm_d){
// printf("threadIdx.x=%d\n", threadIdx.x);
extern __shared__ double tempResidualSqSum[];
// __shared__ double tempResidualSqSum[1024];
tempResidualSqSum[threadIdx.x] = residualSqSum[threadIdx.x];
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
tempResidualSqSum[threadIdx.x] += tempResidualSqSum[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0){
norm_d[0] = std::sqrt(tempResidualSqSum[threadIdx.x]);
// printf("norm_d[0]=%10.10f\n", norm_d[0]);
}
}
double calculateResidualNorm_GPU(int n, long range, double h, double* arr_u, double* arr_f, double* residualSqSum, int residualSqSumRange, double* norm_d, double* norm){
double normValue = 0;
hipLaunchKernelGGL(( calculateResidualSqSumPerRow_d), dim3(n), dim3(n), residualSqSumRange * sizeof (double), 0, n, range, h, arr_u, arr_f, residualSqSum, residualSqSumRange);
// calculateResidualSqSumPerRow_d<<<n, n>>>(n, range, h, arr_u, arr_f, residualSqSum);
hipDeviceSynchronize();
// printf("Start to calculateResidualNorm_d\n");
hipLaunchKernelGGL(( calculateResidualNorm_d), dim3(1), dim3(n), residualSqSumRange * sizeof (double), 0, residualSqSum, norm_d);
// calculateResidualNorm_d<<<1, n>>>(residualSqSum, norm_d);
hipDeviceSynchronize();
// printf("-norm[0]=%10f\n", norm[0]);
hipMemcpy(norm, norm_d, 1 * sizeof(double), hipMemcpyDeviceToHost);
// printf("+norm[0]=%10f\n", norm[0]);
normValue = norm[0];
return normValue;
}
void processNextIter_GPU(int iterNumber, double initialNorm, int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1, double* residualSqSum, int residualSqSumRange, double* norm_d, double* norm){
hipLaunchKernelGGL(( calculate_next_u_d), dim3(n), dim3(n), 0, 0, n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
hipDeviceSynchronize();
double thisNorm = calculateResidualNorm_GPU(n, range, h, arr_u_k_plus_1, arr_f, residualSqSum, residualSqSumRange, norm_d, norm);
double decreasingFactor = initialNorm / thisNorm;
/*
std::cout << "Iter[";
std::cout << iterNumber;
std::cout << "]:norm=";
std::cout << thisNorm;
std::cout << ", decreasingFactor=";
std::cout << decreasingFactor;
std::cout << "\n";
*/
bool greaterThanStopCond = false;
if(decreasingFactor > ::pow(10, 6)){
greaterThanStopCond = true;
}
// terminate the iteration when the initial residual is decreased by a factor of 106 or after 5000 iterations.
if(greaterThanStopCond || (iterNumber >= stopAfterIterNum)){
printf("Iter[%d]:norm=%10f, decreasingFactor=%10f\n", iterNumber, decreasingFactor);
return;
}else{
iterNumber++;
double *swap = arr_u_k;
arr_u_k = arr_u_k_plus_1;
arr_u_k_plus_1 = swap;
processNextIter_GPU(iterNumber, initialNorm, n, range, h, arr_u_k, arr_f, arr_u_k_plus_1, residualSqSum, residualSqSumRange, norm_d, norm);
return;
}
}
void jacobi2D_GPU(int n, long range, double h, double* arr_u_0, double* arr_f_d, double* arr_u_result, double* arr_u_0_d, double* arr_u_result_d){
double *norm_d;
hipMalloc(&norm_d, 1 * sizeof(double));
int iterNumber = 0;
double *norm = (double*) aligned_malloc(1 * sizeof(double));
int residualSqSumRange = ::pow(2, ceil(log2((double)n)));
printf("n=%d, residualSqSumRange=%d\n", n, residualSqSumRange);
double *residualSqSum;
hipMalloc(&residualSqSum, residualSqSumRange * sizeof(double));
hipMemcpy(arr_u_0_d, arr_u_0, range * sizeof(double), hipMemcpyHostToDevice);
// printf("Start to calculateResidualNorm_GPU\n");
double norm_0 = calculateResidualNorm_GPU(n, range, h, arr_u_0_d, arr_f_d, residualSqSum, residualSqSumRange, norm_d, norm);
// printf("norm_0=%10.10f\n", norm_0);
iterNumber++;
processNextIter_GPU(iterNumber, norm_0, n, range, h, arr_u_0_d, arr_f_d, arr_u_result_d, residualSqSum, residualSqSumRange, norm_d, norm);
hipMemcpy(arr_u_result, arr_u_result_d, range * sizeof(double), hipMemcpyDeviceToHost);
hipFree(norm_d);
hipFree(residualSqSum);
aligned_free(norm);
}
int main(int argc, char *argv[]){
// common settings
std::cout.precision(10);
// get input param N
int n = 256;
int threadNumber = 4;
if(argc >= 3){
string nstr = argv[1];
n = stoi(nstr);
string threadNumberStr = argv[2];
int threadNumber = stoi(threadNumberStr);
}
#ifdef _OPENMP
omp_set_num_threads(threadNumber);
#endif
// initialization
// 1. set array f = {f_11, f12, ..., f_1N, f_21, ..., f_N1, ..., f_NN}
double start, end;
long range = n * n;
double* arr_f = (double*) aligned_malloc(range * sizeof(double));
for(long i = 0; i < range; i++){
arr_f[i] = 1;
}
// calculate h according to the input N
double h = (double)1 / (n + 1);
strs << h;
std::cout << "h=" + strs.str() + "\n";
strs.clear();
strs.str("");
// set a initialzation vector u^0
int iterNumber = 0;
double* arr_u_0 = (double*) aligned_malloc(range * sizeof(double));
double* arr_u_result = (double*) aligned_malloc(range * sizeof(double));
double* arr_u_result_ref = (double*) aligned_malloc(range * sizeof(double));
for(long i = 0; i < range; i++){
arr_u_0[i] = 0;
}
double norm_0 = calculateResidualNorm(n, range, h, arr_u_0, arr_f);
iterNumber++;
start = omp_get_wtime();
processNextIter(iterNumber, norm_0, n, range, h, arr_u_0, arr_f, arr_u_result_ref);
end = omp_get_wtime(); // unit: second
/*
std::cout << "arr_u_result:\n";
for(long i = 0; i < range; i++){
std::cout << arr_u_result[i];
std::cout << "\n";
}
*/
strs << n;
std::cout << "N=" + strs.str() +"\n";
strs.clear();
strs.str("");
printf("jacobi2D_omp: run time = %10f second(s).\n", (end - start));
// re-initialize
for(long i = 0; i < range; i++){
arr_u_0[i] = 0;
}
// initialize for GPU version
double *arr_u_0_d, *arr_u_result_d, *arr_f_d;
hipMalloc(&arr_u_0_d, range * sizeof(double));
hipMalloc(&arr_u_result_d, range * sizeof(double));
hipMalloc(&arr_f_d, range * sizeof(double));
start = omp_get_wtime();
hipMemcpy(arr_f_d, arr_f, range * sizeof(double), hipMemcpyHostToDevice);
jacobi2D_GPU(n, range, h, arr_u_0, arr_f_d, arr_u_result, arr_u_0_d, arr_u_result_d);
end = omp_get_wtime();
printf("jacobi2D_GPU: run time = %10f second(s).\n", (end - start));
double err = 0;
for (long i = 0; i < range; i++) {
err += (fabs(arr_u_result[i] - arr_u_result_ref[i]));
}
printf("Total Error = %10e\n", err);
double max_err = 0;
for (long i = 0; i < range; i++){
max_err = ::max(max_err, fabs(arr_u_result[i] - arr_u_result_ref[i]));
}
printf("max_err: %10e\n", max_err);
// free memory before terminate
hipFree(arr_u_0_d);
hipFree(arr_u_result_d);
aligned_free(arr_f);
aligned_free(arr_u_0);
aligned_free(arr_u_result);
aligned_free(arr_u_result_ref);
}
| 83405156444c39aeb081d26f248c841ced10d965.cu | #include <iostream>
#include <string>
#include <stdio.h>
#include <sstream>
#include <cmath>
#include "utils.h"
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
using std::string;
// if after iteration is over this number, we can't get the residual decreased by a factor n, then stop the program.
const int stopAfterIterNum = 5000;
// common utils
std::ostringstream strs;
// g++ -std=c++11 -O3 -o jacobi2D-omp jacobi2D-omp.cpp
// g++-9 -std=c++11 -O3 -o jacobi2D -fopenmp jacobi2D.cpp
// nvcc -std=c++11 -Xcompiler -fopenmp -o jacobi2D jacobi2D.cu
// Without parallel: N=10, run time = 0.011500 second(s).
int calcRowMajorIndex(int i, int j, int columnSize){
int idx = (j + ((i - 1) * columnSize)) - 1;
return idx;
}
__device__ int calcRowMajorIndex_d(int i, int j, int columnSize) {
int idx = j + (i * columnSize);
// printf("i=%d, j=%d, columnSize=%d, idx=%d\n", i, j, columnSize, idx);
return idx;
}
void calculate_next_u(int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
// use the Jacobi method
// calculate arr_u_1
/*
for(int i = 1; i <= n; i++){
for(int j = 1; j <= n; j++){
int idx_i_j = calcRowMajorIndex(i, j, n);
printf("u_k[i=%d][j=%d]=%10f\n", i, j, arr_u_k[idx_i_j]);
}
}
*/
for(int i = 1; i <= n; i++){
#pragma omp parallel for
for(int j = 1; j <= n; j++){
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex(i, j, n);
f_i_j = arr_f[idx_i_j];
if(i - 1 > 0){
int idx_im1_j = calcRowMajorIndex(i - 1, j, n);
u_k_im1_j = arr_u_k[idx_im1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_im1_j, u_k_im1_j);
}
if(j - 1 > 0){
int idx_i_jm1 = calcRowMajorIndex(i, j - 1, n);
u_k_i_jm1 = arr_u_k[idx_i_jm1];
}
if(i < n){
int idx_ip1_j = calcRowMajorIndex(i + 1, j, n);
u_k_ip1_j = arr_u_k[idx_ip1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_ip1_j, u_k_ip1_j);
}
if(j < n){
int idx_i_jp1 = calcRowMajorIndex(i, j + 1, n);
u_k_i_jp1 = arr_u_k[idx_i_jp1];
// printf("arr_u_k[%ld]=%10f\n", idx_i_jp1, u_k_i_jp1);
}
arr_u_k_plus_1[idx_i_j] = (std::pow(h, 2) * f_i_j + u_k_im1_j + u_k_i_jm1 + u_k_ip1_j + u_k_i_jp1) / 4;
// printf("i=%d, j=%d, u(k+1)_i_j=%10f, f_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", i, j, arr_u_k_plus_1[idx_i_j], f_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
}
}
}
double calculateResidualNorm(int n, long range, double h, double* arr_u, double* arr_f){
// calculate residual matrix A * u(k) - f
double norm;
double residualSqSum = 0;
for(int i = 1; i <= n; i++){
#pragma omp parallel for reduction(+: residualSqSum)
for(int j = 1; j <= n; j++){
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_i_j = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex(i, j, n);
f_i_j = arr_f[idx_i_j];
u_k_i_j = arr_u[idx_i_j];
// printf("[i=%d][j=%d] u_k_i_j=%10f\n", i, j, u_k_i_j);
if((i - 1) > 0){
int idx_im1_j = calcRowMajorIndex(i - 1, j, n);
u_k_im1_j = arr_u[idx_im1_j];
// printf("[i=%d][j=%d] u_k_im1_j=%10f\n", i, j, u_k_im1_j);
}
if((j - 1) > 0){
int idx_i_jm1 = calcRowMajorIndex(i, j - 1, n);
u_k_i_jm1 = arr_u[idx_i_jm1];
// printf("[i=%d][j=%d] u_k_i_jm1=%10f\n", i, j, u_k_i_jm1);
}
if(i < n){
int idx_ip1_j = calcRowMajorIndex(i + 1, j, n);
u_k_ip1_j = arr_u[idx_ip1_j];
// printf("[i=%d][j=%d] u_k_ip1_j=%10f\n", i, j, u_k_ip1_j);
}
if(j < n){
int idx_i_jp1 = calcRowMajorIndex(i, j + 1, n);
u_k_i_jp1 = arr_u[idx_i_jp1];
// printf("[i=%d][j=%d] u_k_i_jp1=%10f\n", i, j, u_k_i_jp1);
}
// residual = f_i_j - ((- u_im1_j - u_i_jm1 + 4 u_i_j - u_ip1_j - u_i_jp1)/(h ^ 2))
double a_mult_u = (-1 * u_k_im1_j - u_k_i_jm1 + 4 * u_k_i_j - u_k_ip1_j - u_k_i_jp1) / (std::pow(h, 2.0));
double residual = f_i_j - a_mult_u;
// printf("res[%ld][%ld]=%10f\n", i, j, residual);
residualSqSum += std::pow(residual, 2);
}
}
norm = std::sqrt(residualSqSum);
return norm;
}
void processNextIter(int iterNumber, double initialNorm, int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
calculate_next_u(n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
double thisNorm = calculateResidualNorm(n, range, h, arr_u_k_plus_1, arr_f);
double decreasingFactor = initialNorm / thisNorm;
/*
std::cout << "Iter[";
std::cout << iterNumber;
std::cout << "]:norm=";
std::cout << thisNorm;
std::cout << ", decreasingFactor=";
std::cout << decreasingFactor;
std::cout << "\n";
*/
bool greaterThanStopCond = false;
if(decreasingFactor > std::pow(10, 6)){
greaterThanStopCond = true;
}
// terminate the iteration when the initial residual is decreased by a factor of 106 or after 5000 iterations.
if(greaterThanStopCond || (iterNumber >= stopAfterIterNum)){
printf("Iter[%d]:norm=%10f, decreasingFactor=%10f\n", iterNumber, decreasingFactor);
return;
}else{
iterNumber++;
double *swap = arr_u_k;
arr_u_k = arr_u_k_plus_1;
arr_u_k_plus_1 = swap;
processNextIter(iterNumber, initialNorm, n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
return;
}
}
__global__
void calculate_next_u_d(int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1){
// use the Jacobi method
// calculate arr_u_1
int i = blockIdx.x;
int j = threadIdx.x;
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex_d(i, j, n);
f_i_j = arr_f[idx_i_j];
// printf("f[%d][%d]=%10f\n", i, j , f_i_j);
if(i > 0){
int idx_im1_j = calcRowMajorIndex_d(i - 1, j, n);
u_k_im1_j = arr_u_k[idx_im1_j];
}
if(j > 0){
int idx_i_jm1 = calcRowMajorIndex_d(i, j - 1, n);
u_k_i_jm1 = arr_u_k[idx_i_jm1];
}
if(i < (n - 1)){
int idx_ip1_j = calcRowMajorIndex_d(i + 1, j, n);
u_k_ip1_j = arr_u_k[idx_ip1_j];
// printf("arr_u_k[%ld]=%10f\n", idx_ip1_j, u_k_ip1_j);
}
if(j < (n - 1)){
int idx_i_jp1 = calcRowMajorIndex_d(i, j + 1, n);
u_k_i_jp1 = arr_u_k[idx_i_jp1];
// printf("arr_u_k[%ld]=%10f\n", idx_i_jp1, u_k_i_jp1);
}
arr_u_k_plus_1[idx_i_j] = (std::pow(h, 2) * f_i_j + u_k_im1_j + u_k_i_jm1 + u_k_ip1_j + u_k_i_jp1) / 4;
// printf("i=%d, j=%d, u(k+1)_i_j=%10f, f_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", i, j, arr_u_k_plus_1[idx_i_j], f_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
}
__global__ void calculateResidualSqSumPerRow_d(int n, long range, double h, double* arr_u, double* arr_f, double* residualSqSum, int residualSqSumRange){
// calculate residual matrix A * u(k) - f
int i = blockIdx.x;
int j = threadIdx.x;
extern __shared__ double tempResidualSqSum[];
tempResidualSqSum[threadIdx.x] = 0;
double f_i_j = 0;
double u_k_im1_j = 0;
double u_k_i_jm1 = 0;
double u_k_i_j = 0;
double u_k_ip1_j = 0;
double u_k_i_jp1 = 0;
int idx_i_j = calcRowMajorIndex_d(i, j, n);
// printf("i=%d, j=%d, idx_i_j=%d\n", i, j, idx_i_j);
f_i_j = arr_f[idx_i_j];
// printf("[i=%d][j=%d] f_i_j=%10f\n", i, j, f_i_j);
u_k_i_j = arr_u[idx_i_j];
// printf("[i=%d][j=%d] u_k_i_j=%10f\n", i, j, u_k_i_j);
if(i > 0){
int idx_im1_j = calcRowMajorIndex_d(i - 1, j, n);
u_k_im1_j = arr_u[idx_im1_j];
// printf("[i=%d][j=%d] u_k_im1_j=%10f\n", i, j, u_k_im1_j);
}
if(j > 0){
int idx_i_jm1 = calcRowMajorIndex_d(i, j - 1, n);
u_k_i_jm1 = arr_u[idx_i_jm1];
// printf("[i=%d][j=%d] u_k_i_jm1=%10f\n", i, j, u_k_i_jm1);
}
if(i < (n - 1)){
int idx_ip1_j = calcRowMajorIndex_d(i + 1, j, n);
u_k_ip1_j = arr_u[idx_ip1_j];
// printf("[i=%d][j=%d] u_k_ip1_j=%10f\n", i, j, u_k_ip1_j);
}
if(j < (n - 1)){
int idx_i_jp1 = calcRowMajorIndex_d(i, j + 1, n);
u_k_i_jp1 = arr_u[idx_i_jp1];
// printf("[i=%d][j=%d] u_k_i_jp1=%10f\n", i, j, u_k_i_jp1);
}
// residual = f_i_j - ((- u_im1_j - u_i_jm1 + 4 u_i_j - u_ip1_j - u_i_jp1)/(h ^ 2))
double a_mult_u = (-1 * u_k_im1_j - u_k_i_jm1 + 4 * u_k_i_j - u_k_ip1_j - u_k_i_jp1) / (std::pow(h, 2.0));
double residual = f_i_j - a_mult_u;
// printf("res[%d][%d]=%10f\n", i, j, residual);
tempResidualSqSum[j] = std::pow(residual, 2);
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
// printf("[%d] tempResidualSqSum[%d]=%ld\n", i, threadIdx.x, tempResidualSqSum[threadIdx.x]);
tempResidualSqSum[threadIdx.x] += tempResidualSqSum[threadIdx.x + s];
// printf("[%d] +tempResidualSqSum[%d]=%ld\n", i, threadIdx.x + s, tempResidualSqSum[threadIdx.x + s]);
// printf("[%d] => tempResidualSqSum[%d]=%ld\n", i, threadIdx.x, tempResidualSqSum[threadIdx.x]);
}
__syncthreads();
}
// printf("1threadIdx.x=%d\n", threadIdx.x);
if(threadIdx.x == 0){
// printf("2threadIdx.x=%d\n", threadIdx.x);
residualSqSum[i] = tempResidualSqSum[threadIdx.x];
// printf("residualSqSum[%d]=%10f\n", i, residualSqSum[i]);
}
// printf("residualSqSum=%10f\n", residualSqSum);
// printf("i=%ld, j=%ld, residual=%10f\n", i, j ,residual);
// printf("f_i_j=%10f, u_k_i_j=%10f, u_k_im1_j=%10f, u_k_i_jm1=%10f, u_k_ip1_j=%10f, u_k_i_jp1=%10f\n", f_i_j, u_k_i_j, u_k_im1_j, u_k_i_jm1, u_k_ip1_j, u_k_i_jp1);
// }
// }
}
__global__ void calculateResidualNorm_d(double* residualSqSum, double* norm_d){
// printf("threadIdx.x=%d\n", threadIdx.x);
extern __shared__ double tempResidualSqSum[];
// __shared__ double tempResidualSqSum[1024];
tempResidualSqSum[threadIdx.x] = residualSqSum[threadIdx.x];
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
tempResidualSqSum[threadIdx.x] += tempResidualSqSum[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0){
norm_d[0] = std::sqrt(tempResidualSqSum[threadIdx.x]);
// printf("norm_d[0]=%10.10f\n", norm_d[0]);
}
}
double calculateResidualNorm_GPU(int n, long range, double h, double* arr_u, double* arr_f, double* residualSqSum, int residualSqSumRange, double* norm_d, double* norm){
double normValue = 0;
calculateResidualSqSumPerRow_d<<<n, n, residualSqSumRange * sizeof (double)>>>(n, range, h, arr_u, arr_f, residualSqSum, residualSqSumRange);
// calculateResidualSqSumPerRow_d<<<n, n>>>(n, range, h, arr_u, arr_f, residualSqSum);
cudaDeviceSynchronize();
// printf("Start to calculateResidualNorm_d\n");
calculateResidualNorm_d<<<1, n, residualSqSumRange * sizeof (double)>>>(residualSqSum, norm_d);
// calculateResidualNorm_d<<<1, n>>>(residualSqSum, norm_d);
cudaDeviceSynchronize();
// printf("-norm[0]=%10f\n", norm[0]);
cudaMemcpy(norm, norm_d, 1 * sizeof(double), cudaMemcpyDeviceToHost);
// printf("+norm[0]=%10f\n", norm[0]);
normValue = norm[0];
return normValue;
}
void processNextIter_GPU(int iterNumber, double initialNorm, int n, long range, double h, double* arr_u_k, double* arr_f, double* arr_u_k_plus_1, double* residualSqSum, int residualSqSumRange, double* norm_d, double* norm){
calculate_next_u_d<<<n, n>>>(n, range, h, arr_u_k, arr_f, arr_u_k_plus_1);
cudaDeviceSynchronize();
double thisNorm = calculateResidualNorm_GPU(n, range, h, arr_u_k_plus_1, arr_f, residualSqSum, residualSqSumRange, norm_d, norm);
double decreasingFactor = initialNorm / thisNorm;
/*
std::cout << "Iter[";
std::cout << iterNumber;
std::cout << "]:norm=";
std::cout << thisNorm;
std::cout << ", decreasingFactor=";
std::cout << decreasingFactor;
std::cout << "\n";
*/
bool greaterThanStopCond = false;
if(decreasingFactor > std::pow(10, 6)){
greaterThanStopCond = true;
}
// terminate the iteration when the initial residual is decreased by a factor of 106 or after 5000 iterations.
if(greaterThanStopCond || (iterNumber >= stopAfterIterNum)){
printf("Iter[%d]:norm=%10f, decreasingFactor=%10f\n", iterNumber, decreasingFactor);
return;
}else{
iterNumber++;
double *swap = arr_u_k;
arr_u_k = arr_u_k_plus_1;
arr_u_k_plus_1 = swap;
processNextIter_GPU(iterNumber, initialNorm, n, range, h, arr_u_k, arr_f, arr_u_k_plus_1, residualSqSum, residualSqSumRange, norm_d, norm);
return;
}
}
void jacobi2D_GPU(int n, long range, double h, double* arr_u_0, double* arr_f_d, double* arr_u_result, double* arr_u_0_d, double* arr_u_result_d){
double *norm_d;
cudaMalloc(&norm_d, 1 * sizeof(double));
int iterNumber = 0;
double *norm = (double*) aligned_malloc(1 * sizeof(double));
int residualSqSumRange = std::pow(2, ceil(log2((double)n)));
printf("n=%d, residualSqSumRange=%d\n", n, residualSqSumRange);
double *residualSqSum;
cudaMalloc(&residualSqSum, residualSqSumRange * sizeof(double));
cudaMemcpy(arr_u_0_d, arr_u_0, range * sizeof(double), cudaMemcpyHostToDevice);
// printf("Start to calculateResidualNorm_GPU\n");
double norm_0 = calculateResidualNorm_GPU(n, range, h, arr_u_0_d, arr_f_d, residualSqSum, residualSqSumRange, norm_d, norm);
// printf("norm_0=%10.10f\n", norm_0);
iterNumber++;
processNextIter_GPU(iterNumber, norm_0, n, range, h, arr_u_0_d, arr_f_d, arr_u_result_d, residualSqSum, residualSqSumRange, norm_d, norm);
cudaMemcpy(arr_u_result, arr_u_result_d, range * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(norm_d);
cudaFree(residualSqSum);
aligned_free(norm);
}
int main(int argc, char *argv[]){
// common settings
std::cout.precision(10);
// get input param N
int n = 256;
int threadNumber = 4;
if(argc >= 3){
string nstr = argv[1];
n = stoi(nstr);
string threadNumberStr = argv[2];
int threadNumber = stoi(threadNumberStr);
}
#ifdef _OPENMP
omp_set_num_threads(threadNumber);
#endif
// initialization
// 1. set array f = {f_11, f12, ..., f_1N, f_21, ..., f_N1, ..., f_NN}
double start, end;
long range = n * n;
double* arr_f = (double*) aligned_malloc(range * sizeof(double));
for(long i = 0; i < range; i++){
arr_f[i] = 1;
}
// calculate h according to the input N
double h = (double)1 / (n + 1);
strs << h;
std::cout << "h=" + strs.str() + "\n";
strs.clear();
strs.str("");
// set a initialzation vector u^0
int iterNumber = 0;
double* arr_u_0 = (double*) aligned_malloc(range * sizeof(double));
double* arr_u_result = (double*) aligned_malloc(range * sizeof(double));
double* arr_u_result_ref = (double*) aligned_malloc(range * sizeof(double));
for(long i = 0; i < range; i++){
arr_u_0[i] = 0;
}
double norm_0 = calculateResidualNorm(n, range, h, arr_u_0, arr_f);
iterNumber++;
start = omp_get_wtime();
processNextIter(iterNumber, norm_0, n, range, h, arr_u_0, arr_f, arr_u_result_ref);
end = omp_get_wtime(); // unit: second
/*
std::cout << "arr_u_result:\n";
for(long i = 0; i < range; i++){
std::cout << arr_u_result[i];
std::cout << "\n";
}
*/
strs << n;
std::cout << "N=" + strs.str() +"\n";
strs.clear();
strs.str("");
printf("jacobi2D_omp: run time = %10f second(s).\n", (end - start));
// re-initialize
for(long i = 0; i < range; i++){
arr_u_0[i] = 0;
}
// initialize for GPU version
double *arr_u_0_d, *arr_u_result_d, *arr_f_d;
cudaMalloc(&arr_u_0_d, range * sizeof(double));
cudaMalloc(&arr_u_result_d, range * sizeof(double));
cudaMalloc(&arr_f_d, range * sizeof(double));
start = omp_get_wtime();
cudaMemcpy(arr_f_d, arr_f, range * sizeof(double), cudaMemcpyHostToDevice);
jacobi2D_GPU(n, range, h, arr_u_0, arr_f_d, arr_u_result, arr_u_0_d, arr_u_result_d);
end = omp_get_wtime();
printf("jacobi2D_GPU: run time = %10f second(s).\n", (end - start));
double err = 0;
for (long i = 0; i < range; i++) {
err += (fabs(arr_u_result[i] - arr_u_result_ref[i]));
}
printf("Total Error = %10e\n", err);
double max_err = 0;
for (long i = 0; i < range; i++){
max_err = std::max(max_err, fabs(arr_u_result[i] - arr_u_result_ref[i]));
}
printf("max_err: %10e\n", max_err);
// free memory before terminate
cudaFree(arr_u_0_d);
cudaFree(arr_u_result_d);
aligned_free(arr_f);
aligned_free(arr_u_0);
aligned_free(arr_u_result);
aligned_free(arr_u_result_ref);
}
|
5cda772a81cfa777ac0afc835514976e50cb074a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/subtract.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
namespace raft {
namespace linalg {
template <typename Type>
__global__ void naiveSubtractElemKernel(Type *out, const Type *in1,
const Type *in2, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] - in2[idx];
}
}
template <typename Type>
void naiveSubtractElem(Type *out, const Type *in1, const Type *in2, int len,
hipStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
hipLaunchKernelGGL(( naiveSubtractElemKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename Type>
__global__ void naiveSubtractScalarKernel(Type *out, const Type *in1,
const Type in2, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] - in2;
}
}
template <typename Type>
void naiveSubtractScalar(Type *out, const Type *in1, const Type in2, int len,
hipStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
hipLaunchKernelGGL(( naiveSubtractScalarKernel<Type>)
, dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct SubtractInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const SubtractInputs<T> &dims) {
return os;
}
template <typename T>
class SubtractTest : public ::testing::TestWithParam<SubtractInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SubtractInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(in1, len);
raft::allocate(in2, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0), stream);
r.uniform(in2, len, T(-1.0), T(1.0), stream);
naiveSubtractElem(out_ref, in1, in2, len, stream);
naiveSubtractScalar(out_ref, out_ref, T(1), len, stream);
subtract(out, in1, in2, len, stream);
subtractScalar(out, out, T(1), len, stream);
subtract(in1, in1, in2, len, stream);
subtractScalar(in1, in1, T(1), len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
SubtractInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<SubtractInputs<float>> inputsf2 = {
{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<SubtractInputs<double>> inputsd2 = {
{0.00000001, 1024 * 1024, 1234ULL}};
typedef SubtractTest<float> SubtractTestF;
TEST_P(SubtractTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len,
raft::CompareApprox<float>(params.tolerance)));
}
typedef SubtractTest<double> SubtractTestD;
TEST_P(SubtractTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestD,
::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| 5cda772a81cfa777ac0afc835514976e50cb074a.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/subtract.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
namespace raft {
namespace linalg {
template <typename Type>
__global__ void naiveSubtractElemKernel(Type *out, const Type *in1,
const Type *in2, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] - in2[idx];
}
}
template <typename Type>
void naiveSubtractElem(Type *out, const Type *in1, const Type *in2, int len,
cudaStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSubtractElemKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename Type>
__global__ void naiveSubtractScalarKernel(Type *out, const Type *in1,
const Type in2, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in1[idx] - in2;
}
}
template <typename Type>
void naiveSubtractScalar(Type *out, const Type *in1, const Type in2, int len,
cudaStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSubtractScalarKernel<Type>
<<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct SubtractInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const SubtractInputs<T> &dims) {
return os;
}
template <typename T>
class SubtractTest : public ::testing::TestWithParam<SubtractInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SubtractInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(in1, len);
raft::allocate(in2, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0), stream);
r.uniform(in2, len, T(-1.0), T(1.0), stream);
naiveSubtractElem(out_ref, in1, in2, len, stream);
naiveSubtractScalar(out_ref, out_ref, T(1), len, stream);
subtract(out, in1, in2, len, stream);
subtractScalar(out, out, T(1), len, stream);
subtract(in1, in1, in2, len, stream);
subtractScalar(in1, in1, T(1), len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
SubtractInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<SubtractInputs<float>> inputsf2 = {
{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<SubtractInputs<double>> inputsd2 = {
{0.00000001, 1024 * 1024, 1234ULL}};
typedef SubtractTest<float> SubtractTestF;
TEST_P(SubtractTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len,
raft::CompareApprox<float>(params.tolerance)));
}
typedef SubtractTest<double> SubtractTestD;
TEST_P(SubtractTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestD,
::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
|
96e06277c17f249b2b258e4248c1fa313be1073d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* CCL3D.cu
*/
#define CCL_BLOCK_SIZE_X 8
#define CCL_BLOCK_SIZE_Y 8
#define CCL_BLOCK_SIZE_Z 8
__device__ int d_isNotDone;
__global__ void scanLabels(int* labels, int w, int h, int d) {
const int x = blockIdx.x * CCL_BLOCK_SIZE_X + threadIdx.x;
const int y = blockIdx.y * CCL_BLOCK_SIZE_Y + threadIdx.y;
const int z = blockIdx.z * CCL_BLOCK_SIZE_Z + threadIdx.z;
const int index = (z*h + y)*w + x;
if (x >= w || y >= h || z >= d) return;
const int Z1 = w*h; const int Y1 = w;
int lcur = labels[index];
if (lcur) {
int lmin = index; // MAX
// 26-neighbors
int lne, pos;
for (int Zdif = -Z1; Zdif <= Z1; Zdif += Z1) {
for (int Ydif = -Y1; Ydif <= Y1; Ydif += Y1) {
for (int Xdif = -1; Xdif <= 1; Xdif += 1) {
pos = index + Zdif + Ydif + Xdif;
lne = (pos >= 0 && pos < w*h*d) ? labels[pos] : 0; // circular boundary
if (lne && lne < lmin) lmin = lne;
}
}
}
// need not (Xdif,Ydif,Zdif)=(0,0,0) but no problem
if (lmin < lcur) {
int lpa = labels[lcur];
labels[lpa] = min(lpa, lmin);
d_isNotDone = 1;
}
}
} | 96e06277c17f249b2b258e4248c1fa313be1073d.cu | #include "includes.h"
/*
* CCL3D.cu
*/
#define CCL_BLOCK_SIZE_X 8
#define CCL_BLOCK_SIZE_Y 8
#define CCL_BLOCK_SIZE_Z 8
__device__ int d_isNotDone;
__global__ void scanLabels(int* labels, int w, int h, int d) {
const int x = blockIdx.x * CCL_BLOCK_SIZE_X + threadIdx.x;
const int y = blockIdx.y * CCL_BLOCK_SIZE_Y + threadIdx.y;
const int z = blockIdx.z * CCL_BLOCK_SIZE_Z + threadIdx.z;
const int index = (z*h + y)*w + x;
if (x >= w || y >= h || z >= d) return;
const int Z1 = w*h; const int Y1 = w;
int lcur = labels[index];
if (lcur) {
int lmin = index; // MAX
// 26-neighbors
int lne, pos;
for (int Zdif = -Z1; Zdif <= Z1; Zdif += Z1) {
for (int Ydif = -Y1; Ydif <= Y1; Ydif += Y1) {
for (int Xdif = -1; Xdif <= 1; Xdif += 1) {
pos = index + Zdif + Ydif + Xdif;
lne = (pos >= 0 && pos < w*h*d) ? labels[pos] : 0; // circular boundary
if (lne && lne < lmin) lmin = lne;
}
}
}
// need not (Xdif,Ydif,Zdif)=(0,0,0) but no problem
if (lmin < lcur) {
int lpa = labels[lcur];
labels[lpa] = min(lpa, lmin);
d_isNotDone = 1;
}
}
} |
2a20d25058baf6dfe74065c367165588530b507a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wbd_gpu_detection.cuh"
#include "wbd_detector.h"
namespace wbd
{
namespace gpu
{
namespace detection
{
void initDetectionStages()
{
hipMemcpyToSymbol(stages, hostStages, sizeof(Stage) * WB_STAGE_COUNT);
}
__device__ void sumRegions(hipTextureObject_t texture, float* values, float x, float y, Stage* stage)
{
values[0] = tex2D<float>(texture, x, y);
x += stage->width;
values[1] = tex2D<float>(texture, x, y);
x += stage->width;
values[2] = tex2D<float>(texture, x, y);
y += stage->height;
values[5] = tex2D<float>(texture, x, y);
y += stage->height;
values[8] = tex2D<float>(texture, x, y);
x -= stage->width;
values[7] = tex2D<float>(texture, x, y);
x -= stage->width;
values[6] = tex2D<float>(texture, x, y);
y -= stage->height;
values[3] = tex2D<float>(texture, x, y);
x += stage->width;
values[4] = tex2D<float>(texture, x, y);
} // sumRegions
__device__ float evalLBP(hipTextureObject_t texture, hipTextureObject_t alphas, uint32 x, uint32 y, Stage* stage)
{
const uint8 LBPOrder[8] = { 0, 1, 2, 5, 8, 7, 6, 3 };
float values[9];
sumRegions(texture, values, static_cast<float>(x)+(static_cast<float>(stage->width) * 0.5f), y + (static_cast<float>(stage->height) * 0.5f), stage);
uint8 code = 0;
for (uint8 i = 0; i < 8; ++i)
code |= (values[LBPOrder[i]] > values[4]) << i;
return tex1Dfetch<float>(alphas, stage->alphaOffset + code);
} // evalLBP
__device__ bool eval(hipTextureObject_t texture, hipTextureObject_t alphas, uint32 x, uint32 y, float* response, uint16 startStage, uint16 endStage)
{
for (uint16 i = startStage; i < endStage; ++i) {
Stage stage = stages[i];
*response += evalLBP(texture, alphas, x + stage.x, y + stage.y, &stage);
if (*response < stage.thetaB) {
return false;
}
}
// final waldboost threshold
return *response > WB_FINAL_THRESHOLD;
} // eval
namespace prefixsum
{
__device__ void detectSurvivorsInit
(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
uint32 const& globalOffset,
uint32 const& blockSize,
SurvivorData* survivors,
uint32& survivorCount,
uint32* survivorScanArray,
uint16 endStage
){
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
survivorScanArray[threadId] = survived ? 1 : 0;
__syncthreads();
// up-sweep
uint32 offset = 1;
for (uint32 d = blockSize >> 1; d > 0; d >>= 1, offset <<= 1)
{
__syncthreads();
if (threadId < d)
{
const uint32 ai = offset * (2 * threadId + 1) - 1;
const uint32 bi = offset * (2 * threadId + 2) - 1;
survivorScanArray[bi] += survivorScanArray[ai];
}
}
// down-sweep
if (threadId == 0) {
survivorScanArray[blockSize - 1] = 0;
}
for (uint32 d = 1; d < blockSize; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (threadId < d)
{
const uint32 ai = offset * (2 * threadId + 1) - 1;
const uint32 bi = offset * (2 * threadId + 2) - 1;
const uint32 t = survivorScanArray[ai];
survivorScanArray[ai] = survivorScanArray[bi];
survivorScanArray[bi] += t;
}
}
__syncthreads();
if (threadId == 0)
survivorCount = survivorScanArray[blockSize - 1];
if (survived)
{
uint32 newThreadId = survivorScanArray[threadId];
// save position and current response
survivors[newThreadId].x = x;
survivors[newThreadId].y = y;
survivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors
(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
uint32 const& blockSize,
SurvivorData* survivors,
uint32& survivorCount,
uint32* survivorScanArray,
uint16 startStage,
uint16 endStage
){
float response = survivors[globalOffset + threadId].response;
const uint32 x = survivors[globalOffset + threadId].x;
const uint32 y = survivors[globalOffset + threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
survivorScanArray[threadId] = survived ? 1 : 0;
// up-sweep
int offset = 1;
for (uint32 d = blockSize >> 1; d > 0; d >>= 1, offset <<= 1) {
__syncthreads();
if (threadId < d) {
uint32 ai = offset * (2 * threadId + 1) - 1;
uint32 bi = offset * (2 * threadId + 2) - 1;
survivorScanArray[bi] += survivorScanArray[ai];
}
}
// down-sweep
if (threadId == 0) {
survivorScanArray[blockSize - 1] = 0;
}
for (uint32 d = 1; d < blockSize; d <<= 1) {
offset >>= 1;
__syncthreads();
if (threadId < d) {
uint32 ai = offset * (2 * threadId + 1) - 1;
uint32 bi = offset * (2 * threadId + 2) - 1;
uint32 t = survivorScanArray[ai];
survivorScanArray[ai] = survivorScanArray[bi];
survivorScanArray[bi] += t;
}
}
__syncthreads();
if (threadId == 0)
survivorCount = survivorScanArray[blockSize - 1];
if (survived) {
uint32 newThreadId = globalOffset + survivorScanArray[threadId];
// save position and current response
survivors[newThreadId].x = x;
survivors[newThreadId].y = y;
survivors[newThreadId].response = response;
}
}
__device__ void detectDetections
(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* survivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage
){
float response = survivors[globalOffset + threadId].response;
const uint32 x = survivors[globalOffset + threadId].x;
const uint32 y = survivors[globalOffset + threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived) {
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
hipTextureObject_t texture,
hipTextureObject_t alphas,
Detection* detections,
uint32* detectionCount)
{
extern __shared__ SurvivorData survivors[];
uint32* survivorScanArray = (uint32*)&survivors[blockDim.x * blockDim.y];
__shared__ uint32 survivorCount;
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uint32 blockSize = blockDim.x * blockDim.y;
const uint32 blockPitch = gridDim.x * blockSize;
const uint32 blockOffset = blockIdx.y * blockPitch + blockIdx.x * blockSize;
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, blockOffset, blockSize, survivors, survivorCount, survivorScanArray, 1);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 1, 8);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 8, 64);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 64, 256);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 256, 512);
__syncthreads();
if (threadId >= survivorCount)
return;
atomicshared::detectDetections(texture, alphas, threadId, survivors, detections, detectionCount, 512);
}
} // namespace prefixsum
namespace atomicshared
{
__device__
void detectSurvivorsInit(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
SurvivorData* localSurvivors,
uint32* localSurvivorCount,
uint16 endStage)
{
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
if (survived)
{
uint32 newThreadId = atomicInc(localSurvivorCount, blockDim.x * blockDim.y);
// save position and current response
localSurvivors[newThreadId].x = x;
localSurvivors[newThreadId].y = y;
localSurvivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
SurvivorData* localSurvivors,
uint32* localSurvivorCount,
uint16 startStage,
uint16 endStage)
{
float response = localSurvivors[threadId].response;
const uint32 x = localSurvivors[threadId].x;
const uint32 y = localSurvivors[threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
if (survived)
{
uint32 newThreadId = atomicInc(localSurvivorCount, blockDim.x * blockDim.y);
localSurvivors[newThreadId].x = x;
localSurvivors[newThreadId].y = y;
localSurvivors[newThreadId].response = response;
}
}
__device__
void detectDetections(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
SurvivorData* localSurvivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage)
{
float response = localSurvivors[threadId].response;
const uint32 x = localSurvivors[threadId].x;
const uint32 y = localSurvivors[threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived)
{
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 width,
uint32 height,
Detection* detections,
uint32* detectionCount)
{
extern __shared__ SurvivorData survivors[];
__shared__ uint32 survivorCount;
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width - WB_CLASSIFIER_WIDTH && y < height - WB_CLASSIFIER_HEIGHT)
{
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, survivors, &survivorCount, 1);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 1, 8);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 8, 64);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 64, 256);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 256, 512);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
detectDetections(texture, alphas, threadId, survivors, detections, detectionCount, 512);
}
}
} // namespace atomicshared
namespace atomicglobal
{
__device__
void detectSurvivorsInit(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
uint32* survivorCount,
uint16 endStage)
{
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
if (survived)
{
uint32 threadOffset = atomicInc(survivorCount, blockDim.x * blockDim.y); // there can be max. block size survivors
uint32 newThreadId = globalOffset + threadOffset;
// save position and current response
globalSurvivors[newThreadId].x = x;
globalSurvivors[newThreadId].y = y;
globalSurvivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
uint32* survivorCount,
uint16 startStage,
uint16 endStage)
{
const uint32 id = globalOffset + threadId;
float response = globalSurvivors[id].response;
const uint32 x = globalSurvivors[id].x;
const uint32 y = globalSurvivors[id].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
if (survived)
{
uint32 threadOffset = atomicInc(survivorCount, blockDim.x * blockDim.y); // there can be max. block size survivors
uint32 newThreadId = globalOffset + threadOffset;
globalSurvivors[newThreadId].x = x;
globalSurvivors[newThreadId].y = y;
globalSurvivors[newThreadId].response = response;
}
}
__device__
void detectDetections(
hipTextureObject_t texture,
hipTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage)
{
const uint32 id = globalOffset + threadId;
float response = globalSurvivors[id].response;
const uint32 x = globalSurvivors[id].x;
const uint32 y = globalSurvivors[id].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived)
{
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
hipTextureObject_t texture,
hipTextureObject_t alphas,
const uint32 width,
const uint32 height,
SurvivorData* survivors,
Detection* detections,
uint32* detectionCount)
{
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width - WB_CLASSIFIER_WIDTH && y < height - WB_CLASSIFIER_HEIGHT)
{
__shared__ uint32 blockSurvivors;
const uint32 blockSize = blockDim.x * blockDim.y;
const uint32 blockPitch = gridDim.x * blockSize;
// every block has a reserved space in global mem.
const uint32 blockOffset = blockIdx.y * blockPitch + blockIdx.x * blockSize;
// thread id inside a block
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
blockSurvivors = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, blockOffset, survivors, &blockSurvivors, 1);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 1, 8);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 8, 64);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 64, 256);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 256, 512);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
detectDetections(texture, alphas, threadId, blockOffset, survivors, detections, detectionCount, 512);
}
}
} // namespace atomicglobal
} // namespace detection
} // namespace gpu
} // namespace wbd
| 2a20d25058baf6dfe74065c367165588530b507a.cu | #include "wbd_gpu_detection.cuh"
#include "wbd_detector.h"
namespace wbd
{
namespace gpu
{
namespace detection
{
void initDetectionStages()
{
cudaMemcpyToSymbol(stages, hostStages, sizeof(Stage) * WB_STAGE_COUNT);
}
__device__ void sumRegions(cudaTextureObject_t texture, float* values, float x, float y, Stage* stage)
{
values[0] = tex2D<float>(texture, x, y);
x += stage->width;
values[1] = tex2D<float>(texture, x, y);
x += stage->width;
values[2] = tex2D<float>(texture, x, y);
y += stage->height;
values[5] = tex2D<float>(texture, x, y);
y += stage->height;
values[8] = tex2D<float>(texture, x, y);
x -= stage->width;
values[7] = tex2D<float>(texture, x, y);
x -= stage->width;
values[6] = tex2D<float>(texture, x, y);
y -= stage->height;
values[3] = tex2D<float>(texture, x, y);
x += stage->width;
values[4] = tex2D<float>(texture, x, y);
} // sumRegions
__device__ float evalLBP(cudaTextureObject_t texture, cudaTextureObject_t alphas, uint32 x, uint32 y, Stage* stage)
{
const uint8 LBPOrder[8] = { 0, 1, 2, 5, 8, 7, 6, 3 };
float values[9];
sumRegions(texture, values, static_cast<float>(x)+(static_cast<float>(stage->width) * 0.5f), y + (static_cast<float>(stage->height) * 0.5f), stage);
uint8 code = 0;
for (uint8 i = 0; i < 8; ++i)
code |= (values[LBPOrder[i]] > values[4]) << i;
return tex1Dfetch<float>(alphas, stage->alphaOffset + code);
} // evalLBP
__device__ bool eval(cudaTextureObject_t texture, cudaTextureObject_t alphas, uint32 x, uint32 y, float* response, uint16 startStage, uint16 endStage)
{
for (uint16 i = startStage; i < endStage; ++i) {
Stage stage = stages[i];
*response += evalLBP(texture, alphas, x + stage.x, y + stage.y, &stage);
if (*response < stage.thetaB) {
return false;
}
}
// final waldboost threshold
return *response > WB_FINAL_THRESHOLD;
} // eval
namespace prefixsum
{
__device__ void detectSurvivorsInit
(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
uint32 const& globalOffset,
uint32 const& blockSize,
SurvivorData* survivors,
uint32& survivorCount,
uint32* survivorScanArray,
uint16 endStage
){
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
survivorScanArray[threadId] = survived ? 1 : 0;
__syncthreads();
// up-sweep
uint32 offset = 1;
for (uint32 d = blockSize >> 1; d > 0; d >>= 1, offset <<= 1)
{
__syncthreads();
if (threadId < d)
{
const uint32 ai = offset * (2 * threadId + 1) - 1;
const uint32 bi = offset * (2 * threadId + 2) - 1;
survivorScanArray[bi] += survivorScanArray[ai];
}
}
// down-sweep
if (threadId == 0) {
survivorScanArray[blockSize - 1] = 0;
}
for (uint32 d = 1; d < blockSize; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (threadId < d)
{
const uint32 ai = offset * (2 * threadId + 1) - 1;
const uint32 bi = offset * (2 * threadId + 2) - 1;
const uint32 t = survivorScanArray[ai];
survivorScanArray[ai] = survivorScanArray[bi];
survivorScanArray[bi] += t;
}
}
__syncthreads();
if (threadId == 0)
survivorCount = survivorScanArray[blockSize - 1];
if (survived)
{
uint32 newThreadId = survivorScanArray[threadId];
// save position and current response
survivors[newThreadId].x = x;
survivors[newThreadId].y = y;
survivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors
(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
uint32 const& blockSize,
SurvivorData* survivors,
uint32& survivorCount,
uint32* survivorScanArray,
uint16 startStage,
uint16 endStage
){
float response = survivors[globalOffset + threadId].response;
const uint32 x = survivors[globalOffset + threadId].x;
const uint32 y = survivors[globalOffset + threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
survivorScanArray[threadId] = survived ? 1 : 0;
// up-sweep
int offset = 1;
for (uint32 d = blockSize >> 1; d > 0; d >>= 1, offset <<= 1) {
__syncthreads();
if (threadId < d) {
uint32 ai = offset * (2 * threadId + 1) - 1;
uint32 bi = offset * (2 * threadId + 2) - 1;
survivorScanArray[bi] += survivorScanArray[ai];
}
}
// down-sweep
if (threadId == 0) {
survivorScanArray[blockSize - 1] = 0;
}
for (uint32 d = 1; d < blockSize; d <<= 1) {
offset >>= 1;
__syncthreads();
if (threadId < d) {
uint32 ai = offset * (2 * threadId + 1) - 1;
uint32 bi = offset * (2 * threadId + 2) - 1;
uint32 t = survivorScanArray[ai];
survivorScanArray[ai] = survivorScanArray[bi];
survivorScanArray[bi] += t;
}
}
__syncthreads();
if (threadId == 0)
survivorCount = survivorScanArray[blockSize - 1];
if (survived) {
uint32 newThreadId = globalOffset + survivorScanArray[threadId];
// save position and current response
survivors[newThreadId].x = x;
survivors[newThreadId].y = y;
survivors[newThreadId].response = response;
}
}
__device__ void detectDetections
(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* survivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage
){
float response = survivors[globalOffset + threadId].response;
const uint32 x = survivors[globalOffset + threadId].x;
const uint32 y = survivors[globalOffset + threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived) {
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
Detection* detections,
uint32* detectionCount)
{
extern __shared__ SurvivorData survivors[];
uint32* survivorScanArray = (uint32*)&survivors[blockDim.x * blockDim.y];
__shared__ uint32 survivorCount;
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uint32 blockSize = blockDim.x * blockDim.y;
const uint32 blockPitch = gridDim.x * blockSize;
const uint32 blockOffset = blockIdx.y * blockPitch + blockIdx.x * blockSize;
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, blockOffset, blockSize, survivors, survivorCount, survivorScanArray, 1);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 1, 8);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 8, 64);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 64, 256);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
atomicshared::detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 256, 512);
__syncthreads();
if (threadId >= survivorCount)
return;
atomicshared::detectDetections(texture, alphas, threadId, survivors, detections, detectionCount, 512);
}
} // namespace prefixsum
namespace atomicshared
{
__device__
void detectSurvivorsInit(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
SurvivorData* localSurvivors,
uint32* localSurvivorCount,
uint16 endStage)
{
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
if (survived)
{
uint32 newThreadId = atomicInc(localSurvivorCount, blockDim.x * blockDim.y);
// save position and current response
localSurvivors[newThreadId].x = x;
localSurvivors[newThreadId].y = y;
localSurvivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
SurvivorData* localSurvivors,
uint32* localSurvivorCount,
uint16 startStage,
uint16 endStage)
{
float response = localSurvivors[threadId].response;
const uint32 x = localSurvivors[threadId].x;
const uint32 y = localSurvivors[threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
if (survived)
{
uint32 newThreadId = atomicInc(localSurvivorCount, blockDim.x * blockDim.y);
localSurvivors[newThreadId].x = x;
localSurvivors[newThreadId].y = y;
localSurvivors[newThreadId].response = response;
}
}
__device__
void detectDetections(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
SurvivorData* localSurvivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage)
{
float response = localSurvivors[threadId].response;
const uint32 x = localSurvivors[threadId].x;
const uint32 y = localSurvivors[threadId].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived)
{
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 width,
uint32 height,
Detection* detections,
uint32* detectionCount)
{
extern __shared__ SurvivorData survivors[];
__shared__ uint32 survivorCount;
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width - WB_CLASSIFIER_WIDTH && y < height - WB_CLASSIFIER_HEIGHT)
{
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, survivors, &survivorCount, 1);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 1, 8);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 8, 64);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 64, 256);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
if (threadId == 0)
survivorCount = 0;
__syncthreads();
detectSurvivors(texture, alphas, threadId, survivors, &survivorCount, 256, 512);
__syncthreads();
if (threadId >= survivorCount)
return;
__syncthreads();
detectDetections(texture, alphas, threadId, survivors, detections, detectionCount, 512);
}
}
} // namespace atomicshared
namespace atomicglobal
{
__device__
void detectSurvivorsInit(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& x,
uint32 const& y,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
uint32* survivorCount,
uint16 endStage)
{
float response = 0.0f;
bool survived = eval(texture, alphas, x, y, &response, 0, endStage);
if (survived)
{
uint32 threadOffset = atomicInc(survivorCount, blockDim.x * blockDim.y); // there can be max. block size survivors
uint32 newThreadId = globalOffset + threadOffset;
// save position and current response
globalSurvivors[newThreadId].x = x;
globalSurvivors[newThreadId].y = y;
globalSurvivors[newThreadId].response = response;
}
}
__device__ void detectSurvivors(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
uint32* survivorCount,
uint16 startStage,
uint16 endStage)
{
const uint32 id = globalOffset + threadId;
float response = globalSurvivors[id].response;
const uint32 x = globalSurvivors[id].x;
const uint32 y = globalSurvivors[id].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, endStage);
if (survived)
{
uint32 threadOffset = atomicInc(survivorCount, blockDim.x * blockDim.y); // there can be max. block size survivors
uint32 newThreadId = globalOffset + threadOffset;
globalSurvivors[newThreadId].x = x;
globalSurvivors[newThreadId].y = y;
globalSurvivors[newThreadId].response = response;
}
}
__device__
void detectDetections(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
uint32 const& threadId,
uint32 const& globalOffset,
SurvivorData* globalSurvivors,
Detection* detections,
uint32* detectionCount,
uint16 startStage)
{
const uint32 id = globalOffset + threadId;
float response = globalSurvivors[id].response;
const uint32 x = globalSurvivors[id].x;
const uint32 y = globalSurvivors[id].y;
bool survived = eval(texture, alphas, x, y, &response, startStage, WB_STAGE_COUNT);
if (survived)
{
uint32 pos = atomicInc(detectionCount, WB_MAX_DETECTIONS);
detections[pos].x = x;
detections[pos].y = y;
detections[pos].width = WB_CLASSIFIER_WIDTH;
detections[pos].height = WB_CLASSIFIER_HEIGHT;
detections[pos].response = response;
}
}
__global__ void detect(
cudaTextureObject_t texture,
cudaTextureObject_t alphas,
const uint32 width,
const uint32 height,
SurvivorData* survivors,
Detection* detections,
uint32* detectionCount)
{
const uint32 x = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint32 y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width - WB_CLASSIFIER_WIDTH && y < height - WB_CLASSIFIER_HEIGHT)
{
__shared__ uint32 blockSurvivors;
const uint32 blockSize = blockDim.x * blockDim.y;
const uint32 blockPitch = gridDim.x * blockSize;
// every block has a reserved space in global mem.
const uint32 blockOffset = blockIdx.y * blockPitch + blockIdx.x * blockSize;
// thread id inside a block
const uint32 threadId = threadIdx.y * blockDim.x + threadIdx.x;
if (threadId == 0)
blockSurvivors = 0;
__syncthreads();
detectSurvivorsInit(texture, alphas, x, y, threadId, blockOffset, survivors, &blockSurvivors, 1);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 1, 8);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 8, 64);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 64, 256);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
// dump all threads, which didn't survive
__syncthreads();
if (threadId == 0)
blockSurvivors = 0;
// reset the counter
__syncthreads();
detectSurvivors(texture, alphas, threadId, blockOffset, survivors, &blockSurvivors, 256, 512);
// finish all the detections within a block
__syncthreads();
if (threadId >= blockSurvivors)
return;
detectDetections(texture, alphas, threadId, blockOffset, survivors, detections, detectionCount, 512);
}
}
} // namespace atomicglobal
} // namespace detection
} // namespace gpu
} // namespace wbd
|
96227cc089b22e22a346763d7219ef8e79c9b7d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
// rulebook[3, rulebook_len]:
//[
// [kernel_index],
// [in_i],
// [out_i],
//]
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT>
void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims();
const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = kernel_dims[3];
const int out_channels = kernel_dims[4];
const IntT* rulebook_ptr = rulebook.data<IntT>();
const int rulebook_len = rulebook.dims()[1];
DenseTensorMeta in_features_meta(
x.dtype(), {rulebook_len, in_channels}, DataLayout::NCHW);
DenseTensorMeta d_x_features_meta(
x.dtype(), {rulebook_len, in_channels}, DataLayout::NCHW);
DenseTensorMeta out_grad_features_meta(
x.dtype(), {rulebook_len, out_channels}, DataLayout::NCHW);
phi::DenseTensor in_features =
phi::Empty(dev_ctx, std::move(in_features_meta));
phi::DenseTensor d_x_features =
phi::Empty(dev_ctx, std::move(d_x_features_meta));
phi::DenseTensor out_grad_features =
phi::Empty(dev_ctx, std::move(out_grad_features_meta));
T* in_features_ptr = in_features.data<T>();
T* d_x_features_ptr = d_x_features.data<T>();
T* out_grad_features_ptr = out_grad_features.data<T>();
*kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel);
T* d_kernel_ptr = kernel_grad->data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, kernel_grad, static_cast<T>(0.0f));
int half_kernel_size = kernel_size / 2;
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
DenseTensor x_grad_indices =
phi::EmptyLike<IntT>(dev_ctx, x.non_zero_indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.non_zero_elements());
T* x_grad_values_ptr = x_grad_values.data<T>();
set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f));
set_zero(dev_ctx, &d_x_features, static_cast<T>(0.0f));
phi::Copy<GPUContext>(dev_ctx,
x.non_zero_indices(),
dev_ctx.GetPlace(),
false,
&x_grad_indices);
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
std::vector<IntT> offsets(kernel_size + 1), counter(kernel_size, 0),
h_counter(rulebook_len, 0);
phi::backends::gpu::GpuMemcpyAsync(&h_counter[0],
rulebook_ptr,
rulebook_len * sizeof(IntT),
#ifdef PADDLE_WITH_HIP
hipMemcpyDeviceToHost,
#else
hipMemcpyDeviceToHost,
#endif
dev_ctx.stream());
dev_ctx.Wait();
for (int i = 0; i < rulebook_len; i++) {
counter[h_counter[i]] += 1;
}
IntT offset = 0, max_count = 0;
for (int i = 0; i < kernel_size; i++) {
offsets[i] = offset;
offset += counter[i];
if (i < half_kernel_size) {
max_count = ::max(max_count, counter[i]);
}
}
offsets[kernel_size] = offset;
if (subm) {
phi::funcs::sparse::SubmPreProcess<T, GPUContext>(
dev_ctx,
x,
kernel,
out_grad.non_zero_elements(),
in_channels,
out_channels,
half_kernel_size,
kernel_grad,
&x_grad_values);
if (max_count == 0) {
return;
}
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * in_channels, 1);
hipLaunchKernelGGL(( GatherKernel<T, IntT>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), x.non_zero_elements().data<T>(),
rulebook_ptr + rulebook_len,
in_features_ptr,
rulebook_len,
in_channels);
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * out_channels, 1);
hipLaunchKernelGGL(( GatherKernel<T, IntT>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
out_grad.non_zero_elements().data<T>(),
rulebook_ptr + rulebook_len * 2,
out_grad_features_ptr,
rulebook_len,
out_channels);
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (counter[i] <= 0 || (subm && i == half_kernel_size)) {
continue;
}
const int M = counter[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels;
T* tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels;
T* tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels;
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
blas.GEMM(CblasTrans,
CblasNoTrans,
K,
N,
M,
static_cast<T>(1),
tmp_in_ptr,
tmp_out_grad_ptr,
static_cast<T>(0),
tmp_d_kernel_ptr);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
blas.GEMM(CblasNoTrans,
CblasTrans,
M,
K,
N,
static_cast<T>(1),
tmp_out_grad_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_d_x_ptr);
}
// 4. scatter
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * in_channels, 1);
hipLaunchKernelGGL(( phi::funcs::ScatterCUDAKernel), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(),
d_x_features_ptr,
rulebook_ptr + rulebook_len,
x_grad_values_ptr,
rulebook_len,
in_channels,
false);
}
template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradGPUKernel", ([&] {
Conv3dGradGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
rulebook,
out_grad,
paddings,
dilations,
strides,
groups,
subm,
x_grad,
kernel_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dGradKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| 96227cc089b22e22a346763d7219ef8e79c9b7d6.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
// rulebook[3, rulebook_len]:
//[
// [kernel_index],
// [in_i],
// [out_i],
//]
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT>
void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims();
const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = kernel_dims[3];
const int out_channels = kernel_dims[4];
const IntT* rulebook_ptr = rulebook.data<IntT>();
const int rulebook_len = rulebook.dims()[1];
DenseTensorMeta in_features_meta(
x.dtype(), {rulebook_len, in_channels}, DataLayout::NCHW);
DenseTensorMeta d_x_features_meta(
x.dtype(), {rulebook_len, in_channels}, DataLayout::NCHW);
DenseTensorMeta out_grad_features_meta(
x.dtype(), {rulebook_len, out_channels}, DataLayout::NCHW);
phi::DenseTensor in_features =
phi::Empty(dev_ctx, std::move(in_features_meta));
phi::DenseTensor d_x_features =
phi::Empty(dev_ctx, std::move(d_x_features_meta));
phi::DenseTensor out_grad_features =
phi::Empty(dev_ctx, std::move(out_grad_features_meta));
T* in_features_ptr = in_features.data<T>();
T* d_x_features_ptr = d_x_features.data<T>();
T* out_grad_features_ptr = out_grad_features.data<T>();
*kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel);
T* d_kernel_ptr = kernel_grad->data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, kernel_grad, static_cast<T>(0.0f));
int half_kernel_size = kernel_size / 2;
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
DenseTensor x_grad_indices =
phi::EmptyLike<IntT>(dev_ctx, x.non_zero_indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.non_zero_elements());
T* x_grad_values_ptr = x_grad_values.data<T>();
set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f));
set_zero(dev_ctx, &d_x_features, static_cast<T>(0.0f));
phi::Copy<GPUContext>(dev_ctx,
x.non_zero_indices(),
dev_ctx.GetPlace(),
false,
&x_grad_indices);
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
std::vector<IntT> offsets(kernel_size + 1), counter(kernel_size, 0),
h_counter(rulebook_len, 0);
phi::backends::gpu::GpuMemcpyAsync(&h_counter[0],
rulebook_ptr,
rulebook_len * sizeof(IntT),
#ifdef PADDLE_WITH_HIP
hipMemcpyDeviceToHost,
#else
cudaMemcpyDeviceToHost,
#endif
dev_ctx.stream());
dev_ctx.Wait();
for (int i = 0; i < rulebook_len; i++) {
counter[h_counter[i]] += 1;
}
IntT offset = 0, max_count = 0;
for (int i = 0; i < kernel_size; i++) {
offsets[i] = offset;
offset += counter[i];
if (i < half_kernel_size) {
max_count = std::max(max_count, counter[i]);
}
}
offsets[kernel_size] = offset;
if (subm) {
phi::funcs::sparse::SubmPreProcess<T, GPUContext>(
dev_ctx,
x,
kernel,
out_grad.non_zero_elements(),
in_channels,
out_channels,
half_kernel_size,
kernel_grad,
&x_grad_values);
if (max_count == 0) {
return;
}
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * in_channels, 1);
GatherKernel<T, IntT><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(x.non_zero_elements().data<T>(),
rulebook_ptr + rulebook_len,
in_features_ptr,
rulebook_len,
in_channels);
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * out_channels, 1);
GatherKernel<T, IntT><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
out_grad.non_zero_elements().data<T>(),
rulebook_ptr + rulebook_len * 2,
out_grad_features_ptr,
rulebook_len,
out_channels);
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (counter[i] <= 0 || (subm && i == half_kernel_size)) {
continue;
}
const int M = counter[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels;
T* tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels;
T* tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels;
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
blas.GEMM(CblasTrans,
CblasNoTrans,
K,
N,
M,
static_cast<T>(1),
tmp_in_ptr,
tmp_out_grad_ptr,
static_cast<T>(0),
tmp_d_kernel_ptr);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
blas.GEMM(CblasNoTrans,
CblasTrans,
M,
K,
N,
static_cast<T>(1),
tmp_out_grad_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_d_x_ptr);
}
// 4. scatter
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, rulebook_len * in_channels, 1);
phi::funcs::ScatterCUDAKernel<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(
d_x_features_ptr,
rulebook_ptr + rulebook_len,
x_grad_values_ptr,
rulebook_len,
in_channels,
false);
}
template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradGPUKernel", ([&] {
Conv3dGradGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
rulebook,
out_grad,
paddings,
dilations,
strides,
groups,
subm,
x_grad,
kernel_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dGradKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
c88aba813291ff841bed5823590e8bb77c3deb0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(255, (x)))
char *BMPInFile = "lena.bmp";
char *BMPOutFile = "output.bmp";
char *Filter = "sobel";
int FilterMode = SOBEL_FILTER;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Timer varibales assigned
unsigned int cpuTime;
unsigned int gpuTime;
unsigned int hostToDeviceTime;
unsigned int deviceToHostTime;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
//const int FILTER_RADIUS = 1;
const int FILTER_RADIUS = 2;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 790;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.hip"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[FILTER_AREA] = {1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
}
}
imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;
}
}
}
// Host code
int main(int argc, char** argv)
{
// Create timers:
cutCreateTimer(&cpuTime);
cutCreateTimer(&gpuTime);
cutCreateTimer(&deviceToHostTime);
cutCreateTimer(&hostToDeviceTime);
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
//Adding start and stop timer for cpu
cutStartTimer(cpuTime);
CPU_Sobel(data, out, dib.width, dib.height);
cutStopTimer(cpuTime);
BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( hipMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( hipMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
// calculating the time taken for host to device transfer
cutStartTimer(hostToDeviceTime);
hipMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), hipMemcpyHostToDevice);
cutStopTimer(hostToDeviceTime);
// calculating the GPU timer taken for processing the filer
cutStartTimer(gpuTime);
FilterWrapper(data, dib.width, dib.height);
cutStopTimer(gpuTime);
// calculating the device to host time
cutStartTimer(deviceToHostTime);
hipMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), hipMemcpyDeviceToHost);
cutStopTimer(deviceToHostTime);
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// Printing out the time
printf("Printing the times calculated\n\r");
float getCpuTime = cutGetTimerValue(cpuTime);
float getGpuTime = cutGetTimerValue(gpuTime);
float getHostToDeviceTime = cutGetTimerValue(hostToDeviceTime);
float getDeviceToHostTime = cutGetTimerValue(deviceToHostTime);
printf("CPU time: %0.6f\n\r", getCpuTime);
printf("GPU time: %0.6f\n\r", getGpuTime);
printf("Host 2 Device time: %0.6f\n\r", getHostToDeviceTime);
printf("Device 2 Host time: %0.6f\n\r", getDeviceToHostTime );
Cleanup();
}
void Cleanup(void)
{
// Deleting the Timers
cutDeleteTimer(cpuTime);
cutDeleteTimer(gpuTime);
cutDeleteTimer(hostToDeviceTime);
cutDeleteTimer(deviceToHostTime);
cutilSafeCall( hipDeviceReset() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
case SOBEL_FILTER:
printf("Sobel Filter \n");
hipLaunchKernelGGL(( SobelFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case AVERAGE_FILTER:
printf("Average Filter \n");
hipLaunchKernelGGL(( AverageFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
hipLaunchKernelGGL(( HighBoostFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
}
cutilSafeCall( hipDeviceSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
| c88aba813291ff841bed5823590e8bb77c3deb0b.cu | //
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(255, (x)))
char *BMPInFile = "lena.bmp";
char *BMPOutFile = "output.bmp";
char *Filter = "sobel";
int FilterMode = SOBEL_FILTER;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Timer varibales assigned
unsigned int cpuTime;
unsigned int gpuTime;
unsigned int hostToDeviceTime;
unsigned int deviceToHostTime;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
//const int FILTER_RADIUS = 1;
const int FILTER_RADIUS = 2;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 790;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.cu"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[FILTER_AREA] = {1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
}
}
imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;
}
}
}
// Host code
int main(int argc, char** argv)
{
// Create timers:
cutCreateTimer(&cpuTime);
cutCreateTimer(&gpuTime);
cutCreateTimer(&deviceToHostTime);
cutCreateTimer(&hostToDeviceTime);
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
//Adding start and stop timer for cpu
cutStartTimer(cpuTime);
CPU_Sobel(data, out, dib.width, dib.height);
cutStopTimer(cpuTime);
BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( cudaMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( cudaMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
// calculating the time taken for host to device transfer
cutStartTimer(hostToDeviceTime);
cudaMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cutStopTimer(hostToDeviceTime);
// calculating the GPU timer taken for processing the filer
cutStartTimer(gpuTime);
FilterWrapper(data, dib.width, dib.height);
cutStopTimer(gpuTime);
// calculating the device to host time
cutStartTimer(deviceToHostTime);
cudaMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cutStopTimer(deviceToHostTime);
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// Printing out the time
printf("Printing the times calculated\n\r");
float getCpuTime = cutGetTimerValue(cpuTime);
float getGpuTime = cutGetTimerValue(gpuTime);
float getHostToDeviceTime = cutGetTimerValue(hostToDeviceTime);
float getDeviceToHostTime = cutGetTimerValue(deviceToHostTime);
printf("CPU time: %0.6f\n\r", getCpuTime);
printf("GPU time: %0.6f\n\r", getGpuTime);
printf("Host 2 Device time: %0.6f\n\r", getHostToDeviceTime);
printf("Device 2 Host time: %0.6f\n\r", getDeviceToHostTime );
Cleanup();
}
void Cleanup(void)
{
// Deleting the Timers
cutDeleteTimer(cpuTime);
cutDeleteTimer(gpuTime);
cutDeleteTimer(hostToDeviceTime);
cutDeleteTimer(deviceToHostTime);
cutilSafeCall( cudaThreadExit() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
case SOBEL_FILTER:
printf("Sobel Filter \n");
SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case AVERAGE_FILTER:
printf("Average Filter \n");
AverageFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
}
cutilSafeCall( cudaThreadSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
|
5340eec20641478b15a6abf1ff67e44edb022433.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tensor/gpu_handle.h"
#include "util/gnn_macros.h"
#include "util/mem_holder.h"
namespace gnn
{
__global__ void SetupRandKernel(hiprandState_t *state, unsigned long long seed)
{
const unsigned int tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
/* Each thread gets same seed, a different sequence number,
no offset */
hiprand_init(seed, tidx, 0, &state[tidx]);
}
void GpuHandle::Init(int dev_id, unsigned int _streamcnt)
{
streamcnt = _streamcnt;
hipDeviceReset();
hipSetDevice(dev_id);
cublashandles = new hipblasHandle_t[streamcnt];
cusparsehandles = new hipsparseHandle_t[streamcnt];
inUse = new bool[streamcnt];
while (!resources.empty())
resources.pop();
for (unsigned int id = 0; id < streamcnt; ++id)
{
hipblasCreate(&cublashandles[id]);
hipsparseCreate(&cusparsehandles[id]);
inUse[id] = false;
resources.push(id);
}
hipStreamCreate(&cudaRandStream);
hiprandCreateGenerator(&curandgenerator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curandgenerator, time(NULL));
hipMalloc((void **)&devRandStates, NUM_RND_STREAMS * sizeof(hiprandState_t));
hipLaunchKernelGGL(( SetupRandKernel), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRandStates, 1 + time(NULL)*2);
}
GpuContext GpuHandle::AquireCtx()
{
r_loc.lock();
ASSERT(resources.size(), "running out of gpu resources");
int cur_pos = resources.front();
resources.pop();
r_loc.unlock();
ASSERT(!inUse[cur_pos], "logic error: in-use resource is found available");
inUse[cur_pos] = true;
hipblasSetStream(cublashandles[cur_pos], cudaStreamPerThread);
hipsparseSetStream(cusparsehandles[cur_pos], cudaStreamPerThread);
return GpuContext(cur_pos, cublashandles[cur_pos], cusparsehandles[cur_pos]);
}
void GpuHandle::ReleaseCtx(const GpuContext& ctx)
{
r_loc.lock();
resources.push(ctx.id);
ASSERT(inUse[ctx.id], "logic error: in-use resource is not recorded, or you are releasing same resource multiple times");
inUse[ctx.id] = false;
r_loc.unlock();
}
void GpuHandle::Destroy()
{
hipDeviceSynchronize();
hipStreamDestroy(cudaRandStream);
for (unsigned int id = 0; id < streamcnt; ++id)
{
hipblasDestroy(cublashandles[id]);
hipsparseDestroy(cusparsehandles[id]);
}
delete[] cublashandles;
delete[] cusparsehandles;
delete[] inUse;
hiprandDestroyGenerator(curandgenerator);
hipFree(devRandStates);
streamcnt = 0U;
}
hiprandState_t* GpuHandle::devRandStates = NULL;
hipblasHandle_t* GpuHandle::cublashandles = NULL;
hipsparseHandle_t* GpuHandle::cusparsehandles = NULL;
hiprandGenerator_t GpuHandle::curandgenerator;
unsigned int GpuHandle::streamcnt = 1U;
std::queue< int > GpuHandle::resources;
std::mutex GpuHandle::r_loc;
std::mutex GpuHandle::rand_lock;
bool* GpuHandle::inUse = NULL;
hipStream_t GpuHandle::cudaRandStream;
} | 5340eec20641478b15a6abf1ff67e44edb022433.cu | #include "tensor/gpu_handle.h"
#include "util/gnn_macros.h"
#include "util/mem_holder.h"
namespace gnn
{
__global__ void SetupRandKernel(curandState_t *state, unsigned long long seed)
{
const unsigned int tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
/* Each thread gets same seed, a different sequence number,
no offset */
curand_init(seed, tidx, 0, &state[tidx]);
}
void GpuHandle::Init(int dev_id, unsigned int _streamcnt)
{
streamcnt = _streamcnt;
cudaDeviceReset();
cudaSetDevice(dev_id);
cublashandles = new cublasHandle_t[streamcnt];
cusparsehandles = new cusparseHandle_t[streamcnt];
inUse = new bool[streamcnt];
while (!resources.empty())
resources.pop();
for (unsigned int id = 0; id < streamcnt; ++id)
{
cublasCreate(&cublashandles[id]);
cusparseCreate(&cusparsehandles[id]);
inUse[id] = false;
resources.push(id);
}
cudaStreamCreate(&cudaRandStream);
curandCreateGenerator(&curandgenerator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curandgenerator, time(NULL));
cudaMalloc((void **)&devRandStates, NUM_RND_STREAMS * sizeof(curandState_t));
SetupRandKernel<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(devRandStates, 1 + time(NULL)*2);
}
GpuContext GpuHandle::AquireCtx()
{
r_loc.lock();
ASSERT(resources.size(), "running out of gpu resources");
int cur_pos = resources.front();
resources.pop();
r_loc.unlock();
ASSERT(!inUse[cur_pos], "logic error: in-use resource is found available");
inUse[cur_pos] = true;
cublasSetStream(cublashandles[cur_pos], cudaStreamPerThread);
cusparseSetStream(cusparsehandles[cur_pos], cudaStreamPerThread);
return GpuContext(cur_pos, cublashandles[cur_pos], cusparsehandles[cur_pos]);
}
void GpuHandle::ReleaseCtx(const GpuContext& ctx)
{
r_loc.lock();
resources.push(ctx.id);
ASSERT(inUse[ctx.id], "logic error: in-use resource is not recorded, or you are releasing same resource multiple times");
inUse[ctx.id] = false;
r_loc.unlock();
}
void GpuHandle::Destroy()
{
cudaDeviceSynchronize();
cudaStreamDestroy(cudaRandStream);
for (unsigned int id = 0; id < streamcnt; ++id)
{
cublasDestroy_v2(cublashandles[id]);
cusparseDestroy(cusparsehandles[id]);
}
delete[] cublashandles;
delete[] cusparsehandles;
delete[] inUse;
curandDestroyGenerator(curandgenerator);
cudaFree(devRandStates);
streamcnt = 0U;
}
curandState_t* GpuHandle::devRandStates = NULL;
cublasHandle_t* GpuHandle::cublashandles = NULL;
cusparseHandle_t* GpuHandle::cusparsehandles = NULL;
curandGenerator_t GpuHandle::curandgenerator;
unsigned int GpuHandle::streamcnt = 1U;
std::queue< int > GpuHandle::resources;
std::mutex GpuHandle::r_loc;
std::mutex GpuHandle::rand_lock;
bool* GpuHandle::inUse = NULL;
cudaStream_t GpuHandle::cudaRandStream;
} |
db50cc739995ce08d9e6ebe2ebdcbf493c1df2e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/block_utils.cuh>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <hipcub/hipcub.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
using cudf::detail::device_2dspan;
constexpr int scratch_buffer_size = 512 * 4;
constexpr int compact_streams_block_size = 1024;
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
constexpr bool zero_pll_war = true;
struct byterle_enc_state_s {
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s {
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
};
struct strdata_enc_state_s {
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
char const* str_data[512];
};
struct orcenc_state_s {
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
encoder_chunk_streams stream;
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
stripe_dictionary const* dict_stripe;
} u;
union {
uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer
uint32_t u32[scratch_buffer_size / 4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
uint64_t u64[1024];
} lengths;
};
static inline __device__ uint32_t zigzag(uint32_t v) { return v; }
static inline __device__ uint32_t zigzag(int32_t v)
{
int32_t s = (v >> 31);
return ((v ^ s) * 2) - s;
}
static inline __device__ uint64_t zigzag(uint64_t v) { return v; }
static inline __device__ uint64_t zigzag(int64_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ __uint128_t zigzag(__int128_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(
orcenc_state_s* s, uint8_t const* inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
while (count > 0) {
uint32_t n = min(count, 512);
if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; }
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
}
/**
* @brief ByteRLE encoder
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(
orcenc_state_s* s, uint8_t const* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0) {
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run,
maxvals = min(numvals, 512);
if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0) {
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals) {
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask) {
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32) {
while (next == ~0) {
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3) {
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130) {
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130;
} else if (literal_run && literal_run + repeat_run == maxvals) {
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals) {
literal_run &= ~0x7f;
if (!literal_run) break;
}
if (literal_run > 0) {
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run) {
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0) {
while (repeat_run >= 130) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals) {
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
*/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = {
0, 7, 15, 23, 27, 28, 29, 30, 31};
/**
* @brief Encode a varint value, return the number of bytes written
*/
static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v)
{
uint32_t bytecnt = 0;
for (;;) {
auto c = static_cast<uint32_t>(v & 0x7f);
v >>= 7u;
if (v == 0) {
dst[bytecnt++] = c;
break;
} else {
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
template <class T>
static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i) {
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(
uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f)) {
uint32_t mask;
if (w <= 1) {
v = (v << 1) | (shuffle_xor(v, 1) & 0x1);
v = (v << 2) | (shuffle_xor(v, 2) & 0x3);
v = (v << 4) | (shuffle_xor(v, 4) & 0xf);
mask = 0x7;
} else if (w <= 2) {
v = (v << 2) | (shuffle_xor(v, 1) & 0x3);
v = (v << 4) | (shuffle_xor(v, 2) & 0xf);
mask = 0x3;
} else // if (w <= 4)
{
v = (v << 4) | (shuffle_xor(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); }
}
}
/**
* @brief Integer RLEv2 encoder
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*
* @return number of input values encoded
*/
template <StreamIndexType cid,
class T,
bool is_signed,
uint32_t inmask,
int block_size,
typename Storage>
static __device__ uint32_t IntegerRLE(
orcenc_state_s* s, T const* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage)
{
using block_reduce = hipcub::BlockReduce<T, block_size>;
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
__shared__ volatile uint64_t block_vmin;
while (numvals > 0) {
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512),
literal_run, delta_run;
if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t) {
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals) {
if (delta_map != 0) {
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32) {
for (;;) {
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0) break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0) {
// Find min & max
T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max();
T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min();
uint32_t literal_mode, literal_w;
vmin = block_reduce(temp_storage).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = block_reduce(temp_storage).Reduce(vmax, hipcub::Max());
if (t == 0) {
uint32_t mode1_w, mode2_w;
typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2;
block_vmin = static_cast<uint64_t>(vmin);
if constexpr (sizeof(T) > 4) {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
} else {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1) {
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >=
// 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
} else {
uint32_t range, w;
// Mode 2 base value cannot be bigger than max int64_t, i.e. the first bit has to be 0
if (vmin <= std::numeric_limits<int64_t>::max() and mode1_w > mode2_w and
(literal_run - 1) * (mode1_w - mode2_w) > 4) {
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
} else {
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
__syncthreads();
vmin = static_cast<T>(block_vmin);
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1) {
// Direct mode
if (!t) {
dst[0] = 0x40 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
typename std::make_unsigned<T>::type zzv0 = v0;
if (t < literal_run) { zzv0 = zigzag(v0); }
if (literal_w < 8) {
StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t);
} else if (t < literal_run) {
StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3));
}
} else if (literal_mode == 2) {
// Patched base mode
if (!t) {
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7))
: (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
if (zero_pll_war) {
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
} else {
pll = 0;
}
dst[0] = 0x80 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed) {
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw * 8 - 1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
} else {
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0) {
if (t == literal_run) {
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = zigzag(v0);
if (delta == 0 && delta_run >= 3 && delta_run <= 10) {
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) {
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
} else {
// Delta
uint64_t delta_u = zigzag(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*/
static __device__ void StoreStringData(uint8_t* dst,
strdata_enc_state_s* strenc,
uint32_t len,
int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n < 32; n <<= 1) {
uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; }
dst += pos - len;
__syncthreads();
if (t < 32) {
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n < 16; n <<= 1) {
uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; }
if (wt == 0xf) {
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); }
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
template <int block_size, typename Storage>
static __device__ void encode_null_mask(orcenc_state_s* s,
bitmask_type const* pushdown_mask,
Storage& scan_storage,
int t)
{
if (s->stream.ids[CI_PRESENT] < 0) return;
auto const column = *s->chunk.column;
while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) {
// Number of rows read so far
auto present_rows = s->present_rows;
// valid_buf capacity is byte per thread in block
auto const buf_available_bits = encode_block_size * 8 - s->numvals;
// Number of rows for the block to process in this iteration
auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits);
// Number of rows for this thread to process in this iteration
auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8);
auto const row = s->chunk.null_mask_start_row + present_rows + t * 8;
auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t {
if (t_nrows == 0) return 0;
if (mask == nullptr) return 0xff;
auto const begin_offset = row + offset;
auto const end_offset = min(begin_offset + 8, offset + column.size());
auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset);
return mask_word & 0xff;
};
uint8_t pd_byte = (1 << t_nrows) - 1;
uint32_t pd_set_cnt = t_nrows;
uint32_t offset = t_nrows != 0 ? t * 8 : nrows;
if (pushdown_mask != nullptr) {
pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1);
pd_set_cnt = __popc(pd_byte);
// Scan the number of valid bits to get dst offset for each thread
hipcub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset);
}
auto const mask_byte = get_mask_byte(column.null_mask(), column.offset());
auto dst_offset = offset + s->nnz;
auto vbuf_bit_idx = [](int row) {
// valid_buf is a circular buffer with validity of 8 rows in each element
return row % (encode_block_size * 8);
};
if (dst_offset % 8 == 0 and pd_set_cnt == 8) {
s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte;
} else {
for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) {
// skip bits where pushdown mask is not set
if (not(pd_byte & (1 << bit_idx))) continue;
if (mask_byte & (1 << bit_idx)) {
set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++));
} else {
clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++));
}
}
}
__syncthreads();
if (t == block_size - 1) {
// Number of loaded rows, available for encode
s->numvals += offset + pd_set_cnt;
// Number of loaded rows (different from present_rows because of pushdown masks)
s->nnz += offset + pd_set_cnt;
}
present_rows += nrows;
if (!t) { s->present_rows = present_rows; }
__syncthreads();
// RLE encode the present stream
if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) {
auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7;
auto const nbytes_out = (s->numvals + flush) / 8;
auto const nrows_encoded =
ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8;
if (!t) {
// Number of rows encoded so far
s->present_out += nrows_encoded;
s->numvals -= min(s->numvals, nrows_encoded);
}
__syncthreads();
}
}
// reset shared state
if (t == 0) { s->nnz = 0; }
}
/**
* @brief Encode column data
*
* @param[in] chunks encoder chunks device array [column][rowgroup]
* @param[in, out] streams chunk streams device array [column][rowgroup]
*/
// blockDim {`encode_block_size`,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ union {
typename hipcub::BlockScan<uint32_t, block_size>::TempStorage scan_u32;
typename hipcub::BlockReduce<int32_t, block_size>::TempStorage i32;
typename hipcub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename hipcub::BlockReduce<uint64_t, block_size>::TempStorage u64;
} temp_storage;
orcenc_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[col_id][group_id];
s->stream = streams[col_id][group_id];
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
s->strm_pos[CI_DATA] = 0;
s->strm_pos[CI_PRESENT] = 0;
s->strm_pos[CI_INDEX] = 0;
// Dictionary data is encoded in a separate kernel
s->strm_pos[CI_DATA2] =
s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0;
s->strm_pos[CI_DICTIONARY] =
s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0;
}
__syncthreads();
auto const pushdown_mask = [&]() -> cudf::bitmask_type const* {
auto const parent_index = s->chunk.column->parent_index;
if (!parent_index.has_value()) return nullptr;
return chunks[parent_index.value()][0].column->pushdown_mask;
}();
encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t);
__syncthreads();
auto const column = *s->chunk.column;
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) {
// Fetch non-null values
auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP;
if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) {
// Pass-through
__syncthreads();
if (!t) {
s->cur_row = s->chunk.num_rows;
s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len;
}
} else if (s->cur_row < s->chunk.num_rows) {
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows =
min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)),
encode_block_size);
auto const row = s->chunk.start_row + s->cur_row + t;
auto const is_value_valid = [&]() {
if (t >= nrows) return false;
return bit_value_or(pushdown_mask, column.offset() + row, true) and
bit_value_or(column.null_mask(), column.offset() + row, true);
}();
s->buf.u32[t] = is_value_valid ? 1u : 0u;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, encode_block_size, t);
__syncthreads();
if (is_value_valid) {
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
switch (s->chunk.type_kind) {
case INT:
case DATE:
case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break;
case DOUBLE:
case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break;
case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break;
case BOOLEAN:
case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break;
case TIMESTAMP: {
int64_t ts = column.element<int64_t>(row);
int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int64_t nanos = (ts - seconds * ts_scale);
s->vals.i64[nz_idx] = seconds - orc_utc_epoch;
if (nanos != 0) {
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= powers_of_ten[min(s->chunk.scale, 9)];
if (!(nanos % 100)) {
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10)) {
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u64[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
uint32_t dict_idx = s->chunk.dict_index[row];
if (dict_idx > 0x7fff'ffffu) {
dict_idx = s->chunk.dict_index[dict_idx & 0x7fff'ffffu];
}
s->vals.u32[nz_idx] = dict_idx;
} else {
string_view value = column.element<string_view>(row);
s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data();
s->lengths.u32[nz_idx] = value.size_bytes();
}
break;
// Reusing the lengths array for the scale stream
// Note: can be written in a faster manner, given that all values are equal
case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break;
case LIST:
case MAP: {
auto const& offsets = column.child(lists_column_view::offsets_column_index);
// Compute list length from the offsets
s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) -
offsets.element<size_type>(row + column.offset());
} break;
default: break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) {
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; }
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n) {
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) |
((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) |
((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) |
((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) |
((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) |
((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) |
((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) |
((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t) {
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL ||
s->chunk.type_kind == LIST || s->chunk.type_kind == MAP ||
(s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2))
? nz
: 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind) {
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>(
s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>(
s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s,
s->lengths.u8,
(s->nnz - s->numvals + flush) >> 3,
(s->numvals + flush) >> 3,
flush,
t) *
8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(
s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>(
s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32);
} else {
n = s->numvals;
}
break;
case DECIMAL: {
if (is_value_valid) {
auto const id = column.type().id();
__uint128_t const zz_val =
id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row))
: id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row))
: zigzag(column.element<__int128_t>(row));
auto const offset =
(row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1];
StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val);
}
n = s->numvals;
} break;
default: n = s->numvals; break;
}
__syncthreads();
if (!t) { s->numvals -= min(n, s->numvals); }
}
// Encode secondary stream values
if (s->numlengths > 0) {
uint32_t n;
switch (s->chunk.type_kind) {
case TIMESTAMP:
n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>(
s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64);
break;
case DECIMAL:
case LIST:
case MAP:
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32);
break;
default: n = s->numlengths; break;
}
__syncthreads();
if (!t) { s->numlengths -= min(n, s->numlengths); }
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->stream.ids[t] >= 0) {
// Update actual compressed length
// (not needed for decimal data, whose exact size is known before encode)
if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL))
streams[col_id][group_id].lengths[t] = s->strm_pos[t];
if (!s->stream.data_ptrs[t]) {
streams[col_id][group_id].data_ptrs[t] =
static_cast<uint8_t*>(const_cast<void*>(column.head())) +
(column.offset() + s->chunk.start_row) * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array
* @param[in] columns Pre-order flattened device array of ORC column views
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeStringDictionaries(stripe_dictionary const* stripes,
device_span<orc_column_device_view const> columns,
device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcenc_state_s* const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
int t = threadIdx.x;
if (t == 0) s->u.dict_stripe = &stripes[stripe_id];
__syncthreads();
auto const strm_ptr = &streams[s->u.dict_stripe->column_idx][s->u.dict_stripe->start_rowgroup];
if (t == 0) {
s->chunk = chunks[s->u.dict_stripe->column_idx][s->u.dict_stripe->start_rowgroup];
s->stream = *strm_ptr;
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe->entry_count;
s->cur_row = 0;
}
auto const string_column = columns[s->u.dict_stripe->column_idx];
auto const dict_data = s->u.dict_stripe->data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2) {
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0) {
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY) {
// Encoding string contents
char const* ptr = nullptr;
uint32_t count = 0;
if (t < numvals) {
auto string_val = string_column.element<string_view>(string_idx);
ptr = string_val.data();
count = string_val.size_bytes();
}
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY],
&s->u.strenc,
(ptr) ? count : 0,
t);
if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; }
} else {
// Encoding string lengths
uint32_t count =
(t < numvals)
? static_cast<uint32_t>(string_column.element<string_view>(string_idx).size_bytes())
: 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals) s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0) {
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage);
__syncthreads();
if (!t) {
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0) { s->cur_row += numvals; }
__syncthreads();
}
if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; }
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in,out] streams List of encoder chunk streams [column][rowgroup]
*/
// blockDim {compact_streams_block_size,1,1}
__global__ void __launch_bounds__(compact_streams_block_size)
gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) StripeStream ss;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
auto const t = threadIdx.x;
if (t == 0) { ss = strm_desc[stripe_id][stream_id]; }
__syncthreads();
if (ss.data_ptr == nullptr) { return; }
auto const cid = ss.stream_type;
auto dst_ptr = ss.data_ptr;
for (auto group = ss.first_chunk_id; group < ss.first_chunk_id + ss.num_chunks; ++group) {
auto const len = streams[ss.column_id][group].lengths[cid];
if (len > 0) {
auto const src_ptr = streams[ss.column_id][group].data_ptrs[cid];
for (uint32_t i = t; i < len; i += blockDim.x) {
dst_ptr[i] = src_ptr[i];
}
__syncthreads();
}
if (t == 0) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; }
dst_ptr += len;
}
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] inputs Per-block compression input buffers
* @param[out] outputs Per-block compression output buffers
* @param[out] results Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
* @param[in] max_comp_blk_size Max size of any block after compression
* @param[in] comp_block_align Required alignment for compressed blocks
*/
// blockDim {256,1,1}
__global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc,
device_2dspan<encoder_chunk_streams> streams, // const?
device_span<device_span<uint8_t const>> inputs,
device_span<device_span<uint8_t>> outputs,
device_span<compression_result> results,
device_span<uint8_t> compressed_bfr,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size,
uint32_t comp_block_align)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t* volatile uncomp_base_g;
auto const padded_block_header_size = util::round_up_unsafe(block_header_size, comp_block_align);
auto const padded_comp_block_size = util::round_up_unsafe(max_comp_blk_size, comp_block_align);
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr.data() + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256) {
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
inputs[ss.first_block + b] = {src + b * comp_blk_size, blk_size};
auto const dst_offset =
padded_block_header_size + b * (padded_block_header_size + padded_comp_block_size);
outputs[ss.first_block + b] = {dst + dst_offset, max_comp_blk_size};
results[ss.first_block + b] = {0, compression_status::FAILURE};
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length
*fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] inputs Per-block compression input buffers
* @param[out] outputs Per-block compression output buffers
* @param[out] results Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
* @param[in] max_comp_blk_size Max size of any block after compression
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc,
device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
device_span<uint8_t> compressed_bfr,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t const* volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
uint8_t const* src;
uint8_t* dst;
if (t == 0) ss = strm_desc[stripe_id][stream_id];
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr.data() + ss.bfr_offset;
b = 0;
do {
if (t == 0) {
auto const src_len =
min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
auto dst_len = (results[ss.first_block + b].status == compression_status::SUCCESS)
? results[ss.first_block + b].bytes_written
: src_len;
uint32_t blk_size24{};
// Only use the compressed block if it's smaller than the uncompressed
// If compression failed, dst_len == src_len, so the uncompressed block will be used
if (src_len < dst_len) {
// Copy from uncompressed source
src = inputs[ss.first_block + b].data();
results[ss.first_block + b].bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
} else {
// Compressed block
src = outputs[ss.first_block + b].data();
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst) {
for (uint32_t i = 0; i < blk_size; i += 1024) {
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size) { dst[i + t] = v; }
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0) {
strm_desc[stripe_id][stream_id].stream_size =
static_cast<uint32_t>(dst - (compressed_bfr.data() + ss.bfr_offset));
}
}
void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk
dim3 dim_grid(chunks.size().first, chunks.size().second);
hipLaunchKernelGGL(( gpuEncodeOrcColumnData<encode_block_size>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, streams);
}
void EncodeStripeDictionaries(stripe_dictionary const* stripes,
device_span<orc_column_device_view const> columns,
device_2dspan<EncChunk const> chunks,
uint32_t num_string_columns,
uint32_t num_stripes,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
hipLaunchKernelGGL(( gpuEncodeStringDictionaries<512>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), stripes, columns, chunks, enc_streams);
}
void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(compact_streams_block_size, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
hipLaunchKernelGGL(( gpuCompactOrcDataStreams), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_desc, enc_streams);
}
std::optional<writer_compression_statistics> CompressOrcDataStreams(
device_span<uint8_t> compressed_data,
uint32_t num_compressed_blocks,
CompressionKind compression,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size,
uint32_t comp_block_align,
bool collect_statistics,
device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
device_span<compression_result> comp_res,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<device_span<uint8_t const>> comp_in(num_compressed_blocks, stream);
rmm::device_uvector<device_span<uint8_t>> comp_out(num_compressed_blocks, stream);
dim3 dim_block_init(256, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
hipLaunchKernelGGL(( gpuInitCompressionBlocks), dim3(dim_grid), dim3(dim_block_init), 0, stream.value(), strm_desc,
enc_streams,
comp_in,
comp_out,
comp_res,
compressed_data,
comp_blk_size,
max_comp_blk_size,
comp_block_align);
if (compression == SNAPPY) {
try {
if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) {
gpu_snap(comp_in, comp_out, comp_res, stream);
} else {
nvcomp::batched_compress(
nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream);
}
} catch (...) {
// There was an error in compressing so set an error status for each block
thrust::for_each(
rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(compression_result & stat) { stat.status = compression_status::FAILURE; });
// Since SNAPPY is the default compression (may not be explicitly requested), fall back to
// writing without compression
CUDF_LOG_WARN("ORC writer: compression failed, writing uncompressed data");
}
} else if (compression == ZLIB) {
if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::DEFLATE);
reason) {
CUDF_FAIL("Compression error: " + reason.value());
}
nvcomp::batched_compress(
nvcomp::compression_type::DEFLATE, comp_in, comp_out, comp_res, stream);
} else if (compression == ZSTD) {
if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD);
reason) {
CUDF_FAIL("Compression error: " + reason.value());
}
nvcomp::batched_compress(nvcomp::compression_type::ZSTD, comp_in, comp_out, comp_res, stream);
} else if (compression != NONE) {
CUDF_FAIL("Unsupported compression type");
}
dim3 dim_block_compact(1024, 1);
hipLaunchKernelGGL(( gpuCompactCompressedBlocks), dim3(dim_grid), dim3(dim_block_compact), 0, stream.value(),
strm_desc, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size);
if (collect_statistics) {
return cudf::io::collect_compression_statistics(comp_in, comp_res, stream);
} else {
return std::nullopt;
}
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| db50cc739995ce08d9e6ebe2ebdcbf493c1df2e8.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/block_utils.cuh>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cub/cub.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
using cudf::detail::device_2dspan;
constexpr int scratch_buffer_size = 512 * 4;
constexpr int compact_streams_block_size = 1024;
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
constexpr bool zero_pll_war = true;
struct byterle_enc_state_s {
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s {
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
};
struct strdata_enc_state_s {
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
char const* str_data[512];
};
struct orcenc_state_s {
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
encoder_chunk_streams stream;
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
stripe_dictionary const* dict_stripe;
} u;
union {
uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer
uint32_t u32[scratch_buffer_size / 4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
uint64_t u64[1024];
} lengths;
};
static inline __device__ uint32_t zigzag(uint32_t v) { return v; }
static inline __device__ uint32_t zigzag(int32_t v)
{
int32_t s = (v >> 31);
return ((v ^ s) * 2) - s;
}
static inline __device__ uint64_t zigzag(uint64_t v) { return v; }
static inline __device__ uint64_t zigzag(int64_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ __uint128_t zigzag(__int128_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(
orcenc_state_s* s, uint8_t const* inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
while (count > 0) {
uint32_t n = min(count, 512);
if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; }
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
}
/**
* @brief ByteRLE encoder
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(
orcenc_state_s* s, uint8_t const* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0) {
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run,
maxvals = min(numvals, 512);
if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0) {
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals) {
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask) {
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32) {
while (next == ~0) {
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3) {
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130) {
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130;
} else if (literal_run && literal_run + repeat_run == maxvals) {
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals) {
literal_run &= ~0x7f;
if (!literal_run) break;
}
if (literal_run > 0) {
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run) {
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0) {
while (repeat_run >= 130) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals) {
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
*/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = {
0, 7, 15, 23, 27, 28, 29, 30, 31};
/**
* @brief Encode a varint value, return the number of bytes written
*/
static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v)
{
uint32_t bytecnt = 0;
for (;;) {
auto c = static_cast<uint32_t>(v & 0x7f);
v >>= 7u;
if (v == 0) {
dst[bytecnt++] = c;
break;
} else {
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
template <class T>
static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i) {
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(
uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f)) {
uint32_t mask;
if (w <= 1) {
v = (v << 1) | (shuffle_xor(v, 1) & 0x1);
v = (v << 2) | (shuffle_xor(v, 2) & 0x3);
v = (v << 4) | (shuffle_xor(v, 4) & 0xf);
mask = 0x7;
} else if (w <= 2) {
v = (v << 2) | (shuffle_xor(v, 1) & 0x3);
v = (v << 4) | (shuffle_xor(v, 2) & 0xf);
mask = 0x3;
} else // if (w <= 4)
{
v = (v << 4) | (shuffle_xor(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); }
}
}
/**
* @brief Integer RLEv2 encoder
*
* @tparam cid stream type (strm_pos[cid] will be updated and output stored at
* streams[cid]+strm_pos[cid])
* @tparam inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*
* @return number of input values encoded
*/
template <StreamIndexType cid,
class T,
bool is_signed,
uint32_t inmask,
int block_size,
typename Storage>
static __device__ uint32_t IntegerRLE(
orcenc_state_s* s, T const* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage)
{
using block_reduce = cub::BlockReduce<T, block_size>;
uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
__shared__ volatile uint64_t block_vmin;
while (numvals > 0) {
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512),
literal_run, delta_run;
if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t) {
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals) {
if (delta_map != 0) {
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32) {
for (;;) {
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0) break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0) {
// Find min & max
T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max();
T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min();
uint32_t literal_mode, literal_w;
vmin = block_reduce(temp_storage).Reduce(vmin, cub::Min());
__syncthreads();
vmax = block_reduce(temp_storage).Reduce(vmax, cub::Max());
if (t == 0) {
uint32_t mode1_w, mode2_w;
typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2;
block_vmin = static_cast<uint64_t>(vmin);
if constexpr (sizeof(T) > 4) {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
} else {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1) {
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >=
// 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
} else {
uint32_t range, w;
// Mode 2 base value cannot be bigger than max int64_t, i.e. the first bit has to be 0
if (vmin <= std::numeric_limits<int64_t>::max() and mode1_w > mode2_w and
(literal_run - 1) * (mode1_w - mode2_w) > 4) {
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
} else {
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
__syncthreads();
vmin = static_cast<T>(block_vmin);
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1) {
// Direct mode
if (!t) {
dst[0] = 0x40 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
typename std::make_unsigned<T>::type zzv0 = v0;
if (t < literal_run) { zzv0 = zigzag(v0); }
if (literal_w < 8) {
StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t);
} else if (t < literal_run) {
StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3));
}
} else if (literal_mode == 2) {
// Patched base mode
if (!t) {
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7))
: (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
if (zero_pll_war) {
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
} else {
pll = 0;
}
dst[0] = 0x80 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed) {
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw * 8 - 1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
} else {
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0) {
if (t == literal_run) {
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = zigzag(v0);
if (delta == 0 && delta_run >= 3 && delta_run <= 10) {
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) {
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
} else {
// Delta
uint64_t delta_u = zigzag(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*/
static __device__ void StoreStringData(uint8_t* dst,
strdata_enc_state_s* strenc,
uint32_t len,
int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n < 32; n <<= 1) {
uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; }
dst += pos - len;
__syncthreads();
if (t < 32) {
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n < 16; n <<= 1) {
uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; }
if (wt == 0xf) {
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); }
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
template <int block_size, typename Storage>
static __device__ void encode_null_mask(orcenc_state_s* s,
bitmask_type const* pushdown_mask,
Storage& scan_storage,
int t)
{
if (s->stream.ids[CI_PRESENT] < 0) return;
auto const column = *s->chunk.column;
while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) {
// Number of rows read so far
auto present_rows = s->present_rows;
// valid_buf capacity is byte per thread in block
auto const buf_available_bits = encode_block_size * 8 - s->numvals;
// Number of rows for the block to process in this iteration
auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits);
// Number of rows for this thread to process in this iteration
auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8);
auto const row = s->chunk.null_mask_start_row + present_rows + t * 8;
auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t {
if (t_nrows == 0) return 0;
if (mask == nullptr) return 0xff;
auto const begin_offset = row + offset;
auto const end_offset = min(begin_offset + 8, offset + column.size());
auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset);
return mask_word & 0xff;
};
uint8_t pd_byte = (1 << t_nrows) - 1;
uint32_t pd_set_cnt = t_nrows;
uint32_t offset = t_nrows != 0 ? t * 8 : nrows;
if (pushdown_mask != nullptr) {
pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1);
pd_set_cnt = __popc(pd_byte);
// Scan the number of valid bits to get dst offset for each thread
cub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset);
}
auto const mask_byte = get_mask_byte(column.null_mask(), column.offset());
auto dst_offset = offset + s->nnz;
auto vbuf_bit_idx = [](int row) {
// valid_buf is a circular buffer with validity of 8 rows in each element
return row % (encode_block_size * 8);
};
if (dst_offset % 8 == 0 and pd_set_cnt == 8) {
s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte;
} else {
for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) {
// skip bits where pushdown mask is not set
if (not(pd_byte & (1 << bit_idx))) continue;
if (mask_byte & (1 << bit_idx)) {
set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++));
} else {
clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++));
}
}
}
__syncthreads();
if (t == block_size - 1) {
// Number of loaded rows, available for encode
s->numvals += offset + pd_set_cnt;
// Number of loaded rows (different from present_rows because of pushdown masks)
s->nnz += offset + pd_set_cnt;
}
present_rows += nrows;
if (!t) { s->present_rows = present_rows; }
__syncthreads();
// RLE encode the present stream
if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) {
auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7;
auto const nbytes_out = (s->numvals + flush) / 8;
auto const nrows_encoded =
ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8;
if (!t) {
// Number of rows encoded so far
s->present_out += nrows_encoded;
s->numvals -= min(s->numvals, nrows_encoded);
}
__syncthreads();
}
}
// reset shared state
if (t == 0) { s->nnz = 0; }
}
/**
* @brief Encode column data
*
* @param[in] chunks encoder chunks device array [column][rowgroup]
* @param[in, out] streams chunk streams device array [column][rowgroup]
*/
// blockDim {`encode_block_size`,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ union {
typename cub::BlockScan<uint32_t, block_size>::TempStorage scan_u32;
typename cub::BlockReduce<int32_t, block_size>::TempStorage i32;
typename cub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename cub::BlockReduce<uint64_t, block_size>::TempStorage u64;
} temp_storage;
orcenc_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[col_id][group_id];
s->stream = streams[col_id][group_id];
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
s->strm_pos[CI_DATA] = 0;
s->strm_pos[CI_PRESENT] = 0;
s->strm_pos[CI_INDEX] = 0;
// Dictionary data is encoded in a separate kernel
s->strm_pos[CI_DATA2] =
s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0;
s->strm_pos[CI_DICTIONARY] =
s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0;
}
__syncthreads();
auto const pushdown_mask = [&]() -> cudf::bitmask_type const* {
auto const parent_index = s->chunk.column->parent_index;
if (!parent_index.has_value()) return nullptr;
return chunks[parent_index.value()][0].column->pushdown_mask;
}();
encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t);
__syncthreads();
auto const column = *s->chunk.column;
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) {
// Fetch non-null values
auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP;
if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) {
// Pass-through
__syncthreads();
if (!t) {
s->cur_row = s->chunk.num_rows;
s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len;
}
} else if (s->cur_row < s->chunk.num_rows) {
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows =
min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)),
encode_block_size);
auto const row = s->chunk.start_row + s->cur_row + t;
auto const is_value_valid = [&]() {
if (t >= nrows) return false;
return bit_value_or(pushdown_mask, column.offset() + row, true) and
bit_value_or(column.null_mask(), column.offset() + row, true);
}();
s->buf.u32[t] = is_value_valid ? 1u : 0u;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, encode_block_size, t);
__syncthreads();
if (is_value_valid) {
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
switch (s->chunk.type_kind) {
case INT:
case DATE:
case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break;
case DOUBLE:
case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break;
case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break;
case BOOLEAN:
case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break;
case TIMESTAMP: {
int64_t ts = column.element<int64_t>(row);
int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int64_t nanos = (ts - seconds * ts_scale);
s->vals.i64[nz_idx] = seconds - orc_utc_epoch;
if (nanos != 0) {
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= powers_of_ten[min(s->chunk.scale, 9)];
if (!(nanos % 100)) {
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10)) {
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u64[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
uint32_t dict_idx = s->chunk.dict_index[row];
if (dict_idx > 0x7fff'ffffu) {
dict_idx = s->chunk.dict_index[dict_idx & 0x7fff'ffffu];
}
s->vals.u32[nz_idx] = dict_idx;
} else {
string_view value = column.element<string_view>(row);
s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data();
s->lengths.u32[nz_idx] = value.size_bytes();
}
break;
// Reusing the lengths array for the scale stream
// Note: can be written in a faster manner, given that all values are equal
case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break;
case LIST:
case MAP: {
auto const& offsets = column.child(lists_column_view::offsets_column_index);
// Compute list length from the offsets
s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) -
offsets.element<size_type>(row + column.offset());
} break;
default: break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) {
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; }
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n) {
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) |
((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) |
((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) |
((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) |
((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) |
((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) |
((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) |
((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t) {
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL ||
s->chunk.type_kind == LIST || s->chunk.type_kind == MAP ||
(s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2))
? nz
: 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind) {
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>(
s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>(
s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s,
s->lengths.u8,
(s->nnz - s->numvals + flush) >> 3,
(s->numvals + flush) >> 3,
flush,
t) *
8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(
s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>(
s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32);
} else {
n = s->numvals;
}
break;
case DECIMAL: {
if (is_value_valid) {
auto const id = column.type().id();
__uint128_t const zz_val =
id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row))
: id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row))
: zigzag(column.element<__int128_t>(row));
auto const offset =
(row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1];
StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val);
}
n = s->numvals;
} break;
default: n = s->numvals; break;
}
__syncthreads();
if (!t) { s->numvals -= min(n, s->numvals); }
}
// Encode secondary stream values
if (s->numlengths > 0) {
uint32_t n;
switch (s->chunk.type_kind) {
case TIMESTAMP:
n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>(
s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64);
break;
case DECIMAL:
case LIST:
case MAP:
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32);
break;
default: n = s->numlengths; break;
}
__syncthreads();
if (!t) { s->numlengths -= min(n, s->numlengths); }
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->stream.ids[t] >= 0) {
// Update actual compressed length
// (not needed for decimal data, whose exact size is known before encode)
if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL))
streams[col_id][group_id].lengths[t] = s->strm_pos[t];
if (!s->stream.data_ptrs[t]) {
streams[col_id][group_id].data_ptrs[t] =
static_cast<uint8_t*>(const_cast<void*>(column.head())) +
(column.offset() + s->chunk.start_row) * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array
* @param[in] columns Pre-order flattened device array of ORC column views
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeStringDictionaries(stripe_dictionary const* stripes,
device_span<orc_column_device_view const> columns,
device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcenc_state_s* const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
int t = threadIdx.x;
if (t == 0) s->u.dict_stripe = &stripes[stripe_id];
__syncthreads();
auto const strm_ptr = &streams[s->u.dict_stripe->column_idx][s->u.dict_stripe->start_rowgroup];
if (t == 0) {
s->chunk = chunks[s->u.dict_stripe->column_idx][s->u.dict_stripe->start_rowgroup];
s->stream = *strm_ptr;
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe->entry_count;
s->cur_row = 0;
}
auto const string_column = columns[s->u.dict_stripe->column_idx];
auto const dict_data = s->u.dict_stripe->data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2) {
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0) {
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY) {
// Encoding string contents
char const* ptr = nullptr;
uint32_t count = 0;
if (t < numvals) {
auto string_val = string_column.element<string_view>(string_idx);
ptr = string_val.data();
count = string_val.size_bytes();
}
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY],
&s->u.strenc,
(ptr) ? count : 0,
t);
if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; }
} else {
// Encoding string lengths
uint32_t count =
(t < numvals)
? static_cast<uint32_t>(string_column.element<string_view>(string_idx).size_bytes())
: 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals) s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0) {
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage);
__syncthreads();
if (!t) {
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0) { s->cur_row += numvals; }
__syncthreads();
}
if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; }
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in,out] streams List of encoder chunk streams [column][rowgroup]
*/
// blockDim {compact_streams_block_size,1,1}
__global__ void __launch_bounds__(compact_streams_block_size)
gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) StripeStream ss;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
auto const t = threadIdx.x;
if (t == 0) { ss = strm_desc[stripe_id][stream_id]; }
__syncthreads();
if (ss.data_ptr == nullptr) { return; }
auto const cid = ss.stream_type;
auto dst_ptr = ss.data_ptr;
for (auto group = ss.first_chunk_id; group < ss.first_chunk_id + ss.num_chunks; ++group) {
auto const len = streams[ss.column_id][group].lengths[cid];
if (len > 0) {
auto const src_ptr = streams[ss.column_id][group].data_ptrs[cid];
for (uint32_t i = t; i < len; i += blockDim.x) {
dst_ptr[i] = src_ptr[i];
}
__syncthreads();
}
if (t == 0) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; }
dst_ptr += len;
}
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] inputs Per-block compression input buffers
* @param[out] outputs Per-block compression output buffers
* @param[out] results Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
* @param[in] max_comp_blk_size Max size of any block after compression
* @param[in] comp_block_align Required alignment for compressed blocks
*/
// blockDim {256,1,1}
__global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc,
device_2dspan<encoder_chunk_streams> streams, // const?
device_span<device_span<uint8_t const>> inputs,
device_span<device_span<uint8_t>> outputs,
device_span<compression_result> results,
device_span<uint8_t> compressed_bfr,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size,
uint32_t comp_block_align)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t* volatile uncomp_base_g;
auto const padded_block_header_size = util::round_up_unsafe(block_header_size, comp_block_align);
auto const padded_comp_block_size = util::round_up_unsafe(max_comp_blk_size, comp_block_align);
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr.data() + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256) {
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
inputs[ss.first_block + b] = {src + b * comp_blk_size, blk_size};
auto const dst_offset =
padded_block_header_size + b * (padded_block_header_size + padded_comp_block_size);
outputs[ss.first_block + b] = {dst + dst_offset, max_comp_blk_size};
results[ss.first_block + b] = {0, compression_status::FAILURE};
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length
*fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] inputs Per-block compression input buffers
* @param[out] outputs Per-block compression output buffers
* @param[out] results Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
* @param[in] max_comp_blk_size Max size of any block after compression
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc,
device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
device_span<uint8_t> compressed_bfr,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t const* volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
uint8_t const* src;
uint8_t* dst;
if (t == 0) ss = strm_desc[stripe_id][stream_id];
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr.data() + ss.bfr_offset;
b = 0;
do {
if (t == 0) {
auto const src_len =
min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
auto dst_len = (results[ss.first_block + b].status == compression_status::SUCCESS)
? results[ss.first_block + b].bytes_written
: src_len;
uint32_t blk_size24{};
// Only use the compressed block if it's smaller than the uncompressed
// If compression failed, dst_len == src_len, so the uncompressed block will be used
if (src_len < dst_len) {
// Copy from uncompressed source
src = inputs[ss.first_block + b].data();
results[ss.first_block + b].bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
} else {
// Compressed block
src = outputs[ss.first_block + b].data();
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst) {
for (uint32_t i = 0; i < blk_size; i += 1024) {
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size) { dst[i + t] = v; }
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0) {
strm_desc[stripe_id][stream_id].stream_size =
static_cast<uint32_t>(dst - (compressed_bfr.data() + ss.bfr_offset));
}
}
void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk
dim3 dim_grid(chunks.size().first, chunks.size().second);
gpuEncodeOrcColumnData<encode_block_size>
<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, streams);
}
void EncodeStripeDictionaries(stripe_dictionary const* stripes,
device_span<orc_column_device_view const> columns,
device_2dspan<EncChunk const> chunks,
uint32_t num_string_columns,
uint32_t num_stripes,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
gpuEncodeStringDictionaries<512>
<<<dim_grid, dim_block, 0, stream.value()>>>(stripes, columns, chunks, enc_streams);
}
void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(compact_streams_block_size, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
gpuCompactOrcDataStreams<<<dim_grid, dim_block, 0, stream.value()>>>(strm_desc, enc_streams);
}
std::optional<writer_compression_statistics> CompressOrcDataStreams(
device_span<uint8_t> compressed_data,
uint32_t num_compressed_blocks,
CompressionKind compression,
uint32_t comp_blk_size,
uint32_t max_comp_blk_size,
uint32_t comp_block_align,
bool collect_statistics,
device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
device_span<compression_result> comp_res,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<device_span<uint8_t const>> comp_in(num_compressed_blocks, stream);
rmm::device_uvector<device_span<uint8_t>> comp_out(num_compressed_blocks, stream);
dim3 dim_block_init(256, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
gpuInitCompressionBlocks<<<dim_grid, dim_block_init, 0, stream.value()>>>(strm_desc,
enc_streams,
comp_in,
comp_out,
comp_res,
compressed_data,
comp_blk_size,
max_comp_blk_size,
comp_block_align);
if (compression == SNAPPY) {
try {
if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) {
gpu_snap(comp_in, comp_out, comp_res, stream);
} else {
nvcomp::batched_compress(
nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream);
}
} catch (...) {
// There was an error in compressing so set an error status for each block
thrust::for_each(
rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(compression_result & stat) { stat.status = compression_status::FAILURE; });
// Since SNAPPY is the default compression (may not be explicitly requested), fall back to
// writing without compression
CUDF_LOG_WARN("ORC writer: compression failed, writing uncompressed data");
}
} else if (compression == ZLIB) {
if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::DEFLATE);
reason) {
CUDF_FAIL("Compression error: " + reason.value());
}
nvcomp::batched_compress(
nvcomp::compression_type::DEFLATE, comp_in, comp_out, comp_res, stream);
} else if (compression == ZSTD) {
if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD);
reason) {
CUDF_FAIL("Compression error: " + reason.value());
}
nvcomp::batched_compress(nvcomp::compression_type::ZSTD, comp_in, comp_out, comp_res, stream);
} else if (compression != NONE) {
CUDF_FAIL("Unsupported compression type");
}
dim3 dim_block_compact(1024, 1);
gpuCompactCompressedBlocks<<<dim_grid, dim_block_compact, 0, stream.value()>>>(
strm_desc, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size);
if (collect_statistics) {
return cudf::io::collect_compression_statistics(comp_in, comp_res, stream);
} else {
return std::nullopt;
}
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
69f6b50c0102a6f81136db96b028e45763491c80.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "book.h"
#include "hipsparse.h"
*/
#define threadsPerBlock 64
#define sizeSharedMemory 8
#define BlockDim 1024
#define ITER 3
template <typename T>
__global__ void spmv_pcsr_kernel1(T * d_val, T * d_vector, int * d_cols, int d_nnz, T * d_v)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int icr = blockDim.x * gridDim.x;
while (tid < d_nnz) {
d_v[tid] = d_val[tid] * d_vector[d_cols[tid]];
tid += icr;
}
}
template <typename T>
__global__ void spmv_pcsr_kernel2(T * d_v, int * d_ptr, int N, T * d_out)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
if(gid>=1024)
return;
__shared__ volatile int ptr_s[threadsPerBlock + 1];
__shared__ volatile T v_s[sizeSharedMemory];
// Load ptr into the shared memory ptr_s
ptr_s[tid] = d_ptr[gid];
// Assign thread 0 of every block to store the pointer for the last row handled by the block into the last shared memory location
if (tid == 0) {
if (gid + threadsPerBlock > N) {
ptr_s[threadsPerBlock] = d_ptr[N];
}
else {
ptr_s[threadsPerBlock] = d_ptr[gid + threadsPerBlock];
}
}
__syncthreads();
int temp = (ptr_s[threadsPerBlock] - ptr_s[0]) / threadsPerBlock + 1;
int nlen = min(temp * threadsPerBlock, 1024);
T sum = 0;
int maxlen = ptr_s[threadsPerBlock];
for (int i = ptr_s[0]; i < maxlen; i += nlen) {
int index = i + tid;
__syncthreads();
// Load d_v into the shared memory v_s
for (int j = 0; j < nlen / threadsPerBlock; j++) {
if (index < maxlen) {
v_s[tid + j * threadsPerBlock] = d_v[index];
index += threadsPerBlock;
}
}
__syncthreads();
// Sum up the elements for a row
if (!(ptr_s[tid + 1] <= i || ptr_s[tid] > i + nlen - 1)) {
int row_s = max(ptr_s[tid] - i, 0);
int row_e = min(ptr_s[tid + 1] - i, nlen);
for (int j = row_s; j < row_e; j++) {
sum += v_s[j];
}
}
}
// Write result
d_out[gid] = sum;
}
| 69f6b50c0102a6f81136db96b028e45763491c80.cu | /*
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "book.h"
#include "cusparse.h"
*/
#define threadsPerBlock 64
#define sizeSharedMemory 8
#define BlockDim 1024
#define ITER 3
template <typename T>
__global__ void spmv_pcsr_kernel1(T * d_val, T * d_vector, int * d_cols, int d_nnz, T * d_v)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int icr = blockDim.x * gridDim.x;
while (tid < d_nnz) {
d_v[tid] = d_val[tid] * d_vector[d_cols[tid]];
tid += icr;
}
}
template <typename T>
__global__ void spmv_pcsr_kernel2(T * d_v, int * d_ptr, int N, T * d_out)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
if(gid>=1024)
return;
__shared__ volatile int ptr_s[threadsPerBlock + 1];
__shared__ volatile T v_s[sizeSharedMemory];
// Load ptr into the shared memory ptr_s
ptr_s[tid] = d_ptr[gid];
// Assign thread 0 of every block to store the pointer for the last row handled by the block into the last shared memory location
if (tid == 0) {
if (gid + threadsPerBlock > N) {
ptr_s[threadsPerBlock] = d_ptr[N];
}
else {
ptr_s[threadsPerBlock] = d_ptr[gid + threadsPerBlock];
}
}
__syncthreads();
int temp = (ptr_s[threadsPerBlock] - ptr_s[0]) / threadsPerBlock + 1;
int nlen = min(temp * threadsPerBlock, 1024);
T sum = 0;
int maxlen = ptr_s[threadsPerBlock];
for (int i = ptr_s[0]; i < maxlen; i += nlen) {
int index = i + tid;
__syncthreads();
// Load d_v into the shared memory v_s
for (int j = 0; j < nlen / threadsPerBlock; j++) {
if (index < maxlen) {
v_s[tid + j * threadsPerBlock] = d_v[index];
index += threadsPerBlock;
}
}
__syncthreads();
// Sum up the elements for a row
if (!(ptr_s[tid + 1] <= i || ptr_s[tid] > i + nlen - 1)) {
int row_s = max(ptr_s[tid] - i, 0);
int row_e = min(ptr_s[tid + 1] - i, nlen);
for (int j = row_s; j < row_e; j++) {
sum += v_s[j];
}
}
}
// Write result
d_out[gid] = sum;
}
|
9bafc6bac4f9189894ebdd844d4ac3946ce1143a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
using namespace cv::cuda;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm2(const float& v) { return v*v; }
__device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; }
__device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
__device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; }
template<typename T, typename B>
__global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
const int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= dst.cols || i >= dst.rows)
return;
int bsize = search_radius + block_radius;
int search_window = 2 * search_radius + 1;
float minus_search_window2_inv = -1.f/(search_window * search_window);
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0.f;
if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows)
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx));
value_type av = saturate_cast<value_type>(src(i + ty, j + tx));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
/*if (i == 255 && j == 255)
printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/
sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x));
sum2 += w;
}
}
else
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src));
value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src));
sum2 += w;
}
}
dst(i, j) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
int block_window = 2 * block_radius + 1;
float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn);
float noise_mult = minus_h2_inv/(block_window * block_window);
cudaSafeCall( hipFuncSetCacheConfig (nlm_kernel<T, B<T> >, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( nlm_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template<typename T>
void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, hipStream_t stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream);
static func_t funcs[] =
{
nlm_caller<T, BrdConstant>,
nlm_caller<T, BrdReplicate>,
nlm_caller<T, BrdReflect>,
nlm_caller<T, BrdWrap>,
nlm_caller<T, BrdReflect101>
};
funcs[borderMode](src, dst, search_radius, block_radius, h, stream);
}
template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t);
template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t);
template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t);
}
}}}
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing (fast approximate version)
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2)
{
return thrust::tie(val1, val2);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2)
{
return thrust::tie(val1, val2.x, val2.y);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op, op);
}
};
__device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); }
__device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); }
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); }
template <class T> struct FastNonLocalMeans
{
enum
{
CTA_SIZE = 128,
TILE_COLS = 128,
TILE_ROWS = 32,
STRIDE = CTA_SIZE
};
struct plus
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; }
};
int search_radius;
int block_radius;
int search_window;
int block_window;
float minus_h2_inv;
FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2),
search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {}
PtrStep<T> src;
mutable PtrStepi buffer;
__device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
dist_sums[index] = 0;
for(int tx = 0; tx < block_window; ++tx)
col_sums(tx, index) = 0;
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j;
int by = i + y - search_radius;
int bx = j + x - search_radius;
#if 1
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sum += dist;
}
col_sums(tx + block_radius, index) = col_sum;
}
#else
for (int ty = -block_radius; ty <= block_radius; ++ty)
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sums(tx + block_radius, index) += dist;
}
#endif
up_col_sums(j, index) = col_sums(block_window - 1, index);
}
}
__device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j + block_radius;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx));
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
int ay = i;
int ax = j + block_radius;
T a_up = src(ay - block_radius - 1, ax);
T a_down = src(ay + block_radius, ax);
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
T b_up = src(by - block_radius - 1, bx);
T b_down = src(by + block_radius, bx);
int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up);
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type;
float weights_sum = 0;
sum_type sum = VecTraits<sum_type>::all(0);
float bw2_inv = 1.f/(block_window * block_window);
int sx = j - search_radius;
int sy = i - search_radius;
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
float avg_dist = dist_sums[index] * bw2_inv;
float weight = __expf(avg_dist * minus_h2_inv);
weights_sum += weight;
sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x));
}
__shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)];
reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer),
Unroll<VecTraits<T>::cn>::tie(weights_sum, sum),
threadIdx.x,
Unroll<VecTraits<T>::cn>::op());
if (threadIdx.x == 0)
dst = saturate_cast<T>(sum / weights_sum);
}
__device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const
{
int tbx = blockIdx.x * TILE_COLS;
int tby = blockIdx.y * TILE_ROWS;
int tex = ::min(tbx + TILE_COLS, dst.cols);
int tey = ::min(tby + TILE_ROWS, dst.rows);
PtrStepi col_sums;
col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window;
col_sums.step = buffer.step;
PtrStepi up_col_sums;
up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window;
up_col_sums.step = buffer.step;
extern __shared__ int dist_sums[]; //search_window * search_window
int first = 0;
for (int i = tby; i < tey; ++i)
for (int j = tbx; j < tex; ++j)
{
__syncthreads();
if (j == tbx)
{
initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums);
first = 0;
}
else
{
if (i == tby)
shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums);
else
shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums);
first = (first + 1) % block_window;
}
__syncthreads();
convolve_window(i, j, dist_sums, dst(i, j));
}
}
};
template<typename T>
__global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); }
void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows)
{
typedef FastNonLocalMeans<uchar> FNLM;
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
buffer_cols = search_window * search_window * grid.y;
buffer_rows = src.cols + block_window * grid.x;
}
template<typename T>
void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer,
int search_window, int block_window, float h, hipStream_t stream)
{
typedef FastNonLocalMeans<T> FNLM;
FNLM fnlm(search_window, block_window, h);
fnlm.src = (PtrStepSz<T>)src;
fnlm.buffer = buffer;
dim3 block(FNLM::CTA_SIZE, 1);
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
int smem = search_window * search_window * sizeof(int);
hipLaunchKernelGGL(( fast_nlm_kernel), dim3(grid), dim3(block), smem, 0, fnlm, (PtrStepSz<T>)dst);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t);
template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t);
template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t);
__global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar3 p = lab(y, x);
ab(y,x) = make_uchar2(p.y, p.z);
l(y,x) = p.x;
}
}
void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, hipStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
hipLaunchKernelGGL(( fnlm_split_kernel), dim3(g), dim3(b), 0, 0, lab, l, ab);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar2 p = ab(y, x);
lab(y, x) = make_uchar3(l(y, x), p.x, p.y);
}
}
void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, hipStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
hipLaunchKernelGGL(( fnlm_merge_kernel), dim3(g), dim3(b), 0, 0, l, ab, lab);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
}}}
| 9bafc6bac4f9189894ebdd844d4ac3946ce1143a.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
using namespace cv::cuda;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm2(const float& v) { return v*v; }
__device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; }
__device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
__device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; }
template<typename T, typename B>
__global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
const int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= dst.cols || i >= dst.rows)
return;
int bsize = search_radius + block_radius;
int search_window = 2 * search_radius + 1;
float minus_search_window2_inv = -1.f/(search_window * search_window);
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0.f;
if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows)
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx));
value_type av = saturate_cast<value_type>(src(i + ty, j + tx));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
/*if (i == 255 && j == 255)
printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/
sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x));
sum2 += w;
}
}
else
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src));
value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src));
sum2 += w;
}
}
dst(i, j) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
int block_window = 2 * block_radius + 1;
float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn);
float noise_mult = minus_h2_inv/(block_window * block_window);
cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) );
nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream);
static func_t funcs[] =
{
nlm_caller<T, BrdConstant>,
nlm_caller<T, BrdReplicate>,
nlm_caller<T, BrdReflect>,
nlm_caller<T, BrdWrap>,
nlm_caller<T, BrdReflect101>
};
funcs[borderMode](src, dst, search_radius, block_radius, h, stream);
}
template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
}
}}}
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing (fast approximate version)
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2)
{
return thrust::tie(val1, val2);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2)
{
return thrust::tie(val1, val2.x, val2.y);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op, op);
}
};
__device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); }
__device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); }
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); }
template <class T> struct FastNonLocalMeans
{
enum
{
CTA_SIZE = 128,
TILE_COLS = 128,
TILE_ROWS = 32,
STRIDE = CTA_SIZE
};
struct plus
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; }
};
int search_radius;
int block_radius;
int search_window;
int block_window;
float minus_h2_inv;
FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2),
search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {}
PtrStep<T> src;
mutable PtrStepi buffer;
__device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
dist_sums[index] = 0;
for(int tx = 0; tx < block_window; ++tx)
col_sums(tx, index) = 0;
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j;
int by = i + y - search_radius;
int bx = j + x - search_radius;
#if 1
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sum += dist;
}
col_sums(tx + block_radius, index) = col_sum;
}
#else
for (int ty = -block_radius; ty <= block_radius; ++ty)
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sums(tx + block_radius, index) += dist;
}
#endif
up_col_sums(j, index) = col_sums(block_window - 1, index);
}
}
__device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j + block_radius;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx));
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
int ay = i;
int ax = j + block_radius;
T a_up = src(ay - block_radius - 1, ax);
T a_down = src(ay + block_radius, ax);
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
T b_up = src(by - block_radius - 1, bx);
T b_down = src(by + block_radius, bx);
int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up);
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type;
float weights_sum = 0;
sum_type sum = VecTraits<sum_type>::all(0);
float bw2_inv = 1.f/(block_window * block_window);
int sx = j - search_radius;
int sy = i - search_radius;
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
float avg_dist = dist_sums[index] * bw2_inv;
float weight = __expf(avg_dist * minus_h2_inv);
weights_sum += weight;
sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x));
}
__shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)];
reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer),
Unroll<VecTraits<T>::cn>::tie(weights_sum, sum),
threadIdx.x,
Unroll<VecTraits<T>::cn>::op());
if (threadIdx.x == 0)
dst = saturate_cast<T>(sum / weights_sum);
}
__device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const
{
int tbx = blockIdx.x * TILE_COLS;
int tby = blockIdx.y * TILE_ROWS;
int tex = ::min(tbx + TILE_COLS, dst.cols);
int tey = ::min(tby + TILE_ROWS, dst.rows);
PtrStepi col_sums;
col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window;
col_sums.step = buffer.step;
PtrStepi up_col_sums;
up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window;
up_col_sums.step = buffer.step;
extern __shared__ int dist_sums[]; //search_window * search_window
int first = 0;
for (int i = tby; i < tey; ++i)
for (int j = tbx; j < tex; ++j)
{
__syncthreads();
if (j == tbx)
{
initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums);
first = 0;
}
else
{
if (i == tby)
shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums);
else
shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums);
first = (first + 1) % block_window;
}
__syncthreads();
convolve_window(i, j, dist_sums, dst(i, j));
}
}
};
template<typename T>
__global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); }
void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows)
{
typedef FastNonLocalMeans<uchar> FNLM;
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
buffer_cols = search_window * search_window * grid.y;
buffer_rows = src.cols + block_window * grid.x;
}
template<typename T>
void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer,
int search_window, int block_window, float h, cudaStream_t stream)
{
typedef FastNonLocalMeans<T> FNLM;
FNLM fnlm(search_window, block_window, h);
fnlm.src = (PtrStepSz<T>)src;
fnlm.buffer = buffer;
dim3 block(FNLM::CTA_SIZE, 1);
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
int smem = search_window * search_window * sizeof(int);
fast_nlm_kernel<<<grid, block, smem>>>(fnlm, (PtrStepSz<T>)dst);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
__global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar3 p = lab(y, x);
ab(y,x) = make_uchar2(p.y, p.z);
l(y,x) = p.x;
}
}
void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, cudaStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
fnlm_split_kernel<<<g, b>>>(lab, l, ab);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar2 p = ab(y, x);
lab(y, x) = make_uchar3(l(y, x), p.x, p.y);
}
}
void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, cudaStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
fnlm_merge_kernel<<<g, b>>>(l, ab, lab);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}}}
|
22020347c5f670ab029c21c8e7dbcdb015945e9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by quan on 4/16/17.
//
#include <fluid_kernels.h>
__global__ void advect_kernel(double *d, double *d0, double *velocX, double *velocY,
double *velocZ, double dt, int N, int k)
{
double Ndouble = (double) N;
double dtx = dt * (N - 2);
double dty = dt * (N - 2);
double dtz = dt * (N - 2);
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
double idouble = (double) i;
double jdouble = (double) j;
double kdouble = (double) k;
double s0, s1, t0, t1, u0, u1;
double tmp1, tmp2, tmp3, x, y, z;
double i0, i1, j0, j1, k0, k1;
tmp1 = dtx * velocX[IX(i, j, k)];
tmp2 = dty * velocY[IX(i, j, k)];
tmp3 = dtz * velocZ[IX(i, j, k)];
x = idouble - tmp1;
y = jdouble - tmp2;
z = kdouble - tmp3;
if(x < 0.5f) x = 0.5f;
if(x > Ndouble + 0.5f) x = Ndouble + 0.5f;
i0 = floor(x);
i1 = i0 + 1.0f;
if(y < 0.5f) y = 0.5f;
if(y > Ndouble + 0.5f) y = Ndouble + 0.5f;
j0 = floor(y);
j1 = j0 + 1.0f;
if(z < 0.5f) z = 0.5f;
if(z > Ndouble + 0.5f) z = Ndouble + 0.5f;
k0 = floor(z);
k1 = k0 + 1.0f;
s1 = x - i0;
s0 = 1.0f - s1;
t1 = y - j0;
t0 = 1.0f - t1;
u1 = z - k0;
u0 = 1.0f - u1;
int i0i = (int) i0;
int i1i = (int) i1;
int j0i = (int) j0;
int j1i = (int) j1;
int k0i = (int) k0;
int k1i = (int) k1;
d[IX(i, j, k)] =
s0 * ( t0 * (u0 * d0[IX(i0i, j0i, k0i)]
+u1 * d0[IX(i0i, j0i, k1i)])
+( t1 * (u0 * d0[IX(i0i, j1i, k0i)]
+u1 * d0[IX(i0i, j1i, k1i)])))
+s1 * ( t0 * (u0 * d0[IX(i1i, j0i, k0i)]
+u1 * d0[IX(i1i, j0i, k1i)])
+( t1 * (u0 * d0[IX(i1i, j1i, k0i)]
+u1 * d0[IX(i1i, j1i, k1i)])));
}
__global__ void set_bnd_kernel1(int b, double *x, int N)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x[IX(i, j, 0 )] = b == 3 ? -x[IX(i, j, 1 )] : x[IX(i, j, 1 )];
x[IX(i, j, N-1)] = b == 3 ? -x[IX(i, j, N-2)] : x[IX(i, j, N-2)];
x[IX(i, 0 , j)] = b == 2 ? -x[IX(i, 1 , j)] : x[IX(i, 1 , j)];
x[IX(i, N-1, j)] = b == 2 ? -x[IX(i, N-2, j)] : x[IX(i, N-2, j)];
x[IX(0 , i, j)] = b == 1 ? -x[IX(1 , i, j)] : x[IX(1 , i, j)];
x[IX(N-1, i, j)] = b == 1 ? -x[IX(N-2, i, j)] : x[IX(N-2, i, j)];
}
__global__ void set_bnd_kernel2(double *x, int N)
{
x[IX(0, 0, 0)] = 0.33f * (x[IX(1, 0, 0)]
+ x[IX(0, 1, 0)]
+ x[IX(0, 0, 1)]);
x[IX(0, N-1, 0)] = 0.33f * (x[IX(1, N-1, 0)]
+ x[IX(0, N-2, 0)]
+ x[IX(0, N-1, 1)]);
x[IX(0, 0, N-1)] = 0.33f * (x[IX(1, 0, N-1)]
+ x[IX(0, 1, N-1)]
+ x[IX(0, 0, N)]);
x[IX(0, N-1, N-1)] = 0.33f * (x[IX(1, N-1, N-1)]
+ x[IX(0, N-2, N-1)]
+ x[IX(0, N-1, N-2)]);
x[IX(N-1, 0, 0)] = 0.33f * (x[IX(N-2, 0, 0)]
+ x[IX(N-1, 1, 0)]
+ x[IX(N-1, 0, 1)]);
x[IX(N-1, N-1, 0)] = 0.33f * (x[IX(N-2, N-1, 0)]
+ x[IX(N-1, N-2, 0)]
+ x[IX(N-1, N-1, 1)]);
x[IX(N-1, 0, N-1)] = 0.33f * (x[IX(N-2, 0, N-1)]
+ x[IX(N-1, 1, N-1)]
+ x[IX(N-1, 0, N-2)]);
x[IX(N-1, N-1, N-1)] = 0.33f * (x[IX(N-2, N-1, N-1)]
+ x[IX(N-1, N-2, N-1)]
+ x[IX(N-1, N-1, N-2)]);
}
__global__ void project_kernel1(double *velocX, double *velocY, double *velocZ,
double *p, double *div, int N, double N_recip, int k)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
div[IX(i, j, k)] = -0.5f*(
velocX[IX(i+1, j , k )]
-velocX[IX(i-1, j , k )]
+velocY[IX(i , j+1, k )]
-velocY[IX(i , j-1, k )]
+velocZ[IX(i , j , k+1)]
-velocZ[IX(i , j , k-1)]
) * N_recip;
p[IX(i, j, k)] = 0;
}
__global__ void project_kernel2(double *velocX, double *velocY, double *velocZ,
double *p, int N, int k)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
velocX[IX(i, j, k)] -= 0.5f * ( p[IX(i+1, j, k)]
-p[IX(i-1, j, k)]) * N;
velocY[IX(i, j, k)] -= 0.5f * ( p[IX(i, j+1, k)]
-p[IX(i, j-1, k)]) * N;
velocZ[IX(i, j, k)] -= 0.5f * ( p[IX(i, j, k+1)]
-p[IX(i, j, k-1)]) * N;
}
__global__ void lin_solve_kernel(double *x_next, double *x, double *x0, double a,
double cRecip, int N, int m)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x_next[IX(i, j, m)] = (x0[IX(i, j, m)]
+ a * (x[IX(i+1, j , m )]
+ x[IX(i-1, j , m )]
+ x[IX(i , j+1, m )]
+ x[IX(i , j-1, m )]
+ x[IX(i , j , m+1)]
+ x[IX(i , j , m-1)]))
* cRecip;
}
__global__ void set_values_kernel(double *x_next, double *x, int m, int N)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x[IX(i, j, m)] = x_next[IX(i, j, m)];
}
| 22020347c5f670ab029c21c8e7dbcdb015945e9f.cu | //
// Created by quan on 4/16/17.
//
#include <fluid_kernels.h>
__global__ void advect_kernel(double *d, double *d0, double *velocX, double *velocY,
double *velocZ, double dt, int N, int k)
{
double Ndouble = (double) N;
double dtx = dt * (N - 2);
double dty = dt * (N - 2);
double dtz = dt * (N - 2);
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
double idouble = (double) i;
double jdouble = (double) j;
double kdouble = (double) k;
double s0, s1, t0, t1, u0, u1;
double tmp1, tmp2, tmp3, x, y, z;
double i0, i1, j0, j1, k0, k1;
tmp1 = dtx * velocX[IX(i, j, k)];
tmp2 = dty * velocY[IX(i, j, k)];
tmp3 = dtz * velocZ[IX(i, j, k)];
x = idouble - tmp1;
y = jdouble - tmp2;
z = kdouble - tmp3;
if(x < 0.5f) x = 0.5f;
if(x > Ndouble + 0.5f) x = Ndouble + 0.5f;
i0 = floor(x);
i1 = i0 + 1.0f;
if(y < 0.5f) y = 0.5f;
if(y > Ndouble + 0.5f) y = Ndouble + 0.5f;
j0 = floor(y);
j1 = j0 + 1.0f;
if(z < 0.5f) z = 0.5f;
if(z > Ndouble + 0.5f) z = Ndouble + 0.5f;
k0 = floor(z);
k1 = k0 + 1.0f;
s1 = x - i0;
s0 = 1.0f - s1;
t1 = y - j0;
t0 = 1.0f - t1;
u1 = z - k0;
u0 = 1.0f - u1;
int i0i = (int) i0;
int i1i = (int) i1;
int j0i = (int) j0;
int j1i = (int) j1;
int k0i = (int) k0;
int k1i = (int) k1;
d[IX(i, j, k)] =
s0 * ( t0 * (u0 * d0[IX(i0i, j0i, k0i)]
+u1 * d0[IX(i0i, j0i, k1i)])
+( t1 * (u0 * d0[IX(i0i, j1i, k0i)]
+u1 * d0[IX(i0i, j1i, k1i)])))
+s1 * ( t0 * (u0 * d0[IX(i1i, j0i, k0i)]
+u1 * d0[IX(i1i, j0i, k1i)])
+( t1 * (u0 * d0[IX(i1i, j1i, k0i)]
+u1 * d0[IX(i1i, j1i, k1i)])));
}
__global__ void set_bnd_kernel1(int b, double *x, int N)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x[IX(i, j, 0 )] = b == 3 ? -x[IX(i, j, 1 )] : x[IX(i, j, 1 )];
x[IX(i, j, N-1)] = b == 3 ? -x[IX(i, j, N-2)] : x[IX(i, j, N-2)];
x[IX(i, 0 , j)] = b == 2 ? -x[IX(i, 1 , j)] : x[IX(i, 1 , j)];
x[IX(i, N-1, j)] = b == 2 ? -x[IX(i, N-2, j)] : x[IX(i, N-2, j)];
x[IX(0 , i, j)] = b == 1 ? -x[IX(1 , i, j)] : x[IX(1 , i, j)];
x[IX(N-1, i, j)] = b == 1 ? -x[IX(N-2, i, j)] : x[IX(N-2, i, j)];
}
__global__ void set_bnd_kernel2(double *x, int N)
{
x[IX(0, 0, 0)] = 0.33f * (x[IX(1, 0, 0)]
+ x[IX(0, 1, 0)]
+ x[IX(0, 0, 1)]);
x[IX(0, N-1, 0)] = 0.33f * (x[IX(1, N-1, 0)]
+ x[IX(0, N-2, 0)]
+ x[IX(0, N-1, 1)]);
x[IX(0, 0, N-1)] = 0.33f * (x[IX(1, 0, N-1)]
+ x[IX(0, 1, N-1)]
+ x[IX(0, 0, N)]);
x[IX(0, N-1, N-1)] = 0.33f * (x[IX(1, N-1, N-1)]
+ x[IX(0, N-2, N-1)]
+ x[IX(0, N-1, N-2)]);
x[IX(N-1, 0, 0)] = 0.33f * (x[IX(N-2, 0, 0)]
+ x[IX(N-1, 1, 0)]
+ x[IX(N-1, 0, 1)]);
x[IX(N-1, N-1, 0)] = 0.33f * (x[IX(N-2, N-1, 0)]
+ x[IX(N-1, N-2, 0)]
+ x[IX(N-1, N-1, 1)]);
x[IX(N-1, 0, N-1)] = 0.33f * (x[IX(N-2, 0, N-1)]
+ x[IX(N-1, 1, N-1)]
+ x[IX(N-1, 0, N-2)]);
x[IX(N-1, N-1, N-1)] = 0.33f * (x[IX(N-2, N-1, N-1)]
+ x[IX(N-1, N-2, N-1)]
+ x[IX(N-1, N-1, N-2)]);
}
__global__ void project_kernel1(double *velocX, double *velocY, double *velocZ,
double *p, double *div, int N, double N_recip, int k)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
div[IX(i, j, k)] = -0.5f*(
velocX[IX(i+1, j , k )]
-velocX[IX(i-1, j , k )]
+velocY[IX(i , j+1, k )]
-velocY[IX(i , j-1, k )]
+velocZ[IX(i , j , k+1)]
-velocZ[IX(i , j , k-1)]
) * N_recip;
p[IX(i, j, k)] = 0;
}
__global__ void project_kernel2(double *velocX, double *velocY, double *velocZ,
double *p, int N, int k)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
velocX[IX(i, j, k)] -= 0.5f * ( p[IX(i+1, j, k)]
-p[IX(i-1, j, k)]) * N;
velocY[IX(i, j, k)] -= 0.5f * ( p[IX(i, j+1, k)]
-p[IX(i, j-1, k)]) * N;
velocZ[IX(i, j, k)] -= 0.5f * ( p[IX(i, j, k+1)]
-p[IX(i, j, k-1)]) * N;
}
__global__ void lin_solve_kernel(double *x_next, double *x, double *x0, double a,
double cRecip, int N, int m)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x_next[IX(i, j, m)] = (x0[IX(i, j, m)]
+ a * (x[IX(i+1, j , m )]
+ x[IX(i-1, j , m )]
+ x[IX(i , j+1, m )]
+ x[IX(i , j-1, m )]
+ x[IX(i , j , m+1)]
+ x[IX(i , j , m-1)]))
* cRecip;
}
__global__ void set_values_kernel(double *x_next, double *x, int m, int N)
{
int j = blockIdx.x + 1;
int i = threadIdx.x + 1;
x[IX(i, j, m)] = x_next[IX(i, j, m)];
}
|
27bdf7da0360a6da376a0bc0e91548de3a66b246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GPUNet.cpp
*
* Created on: Jan 5, 2014
* Author: trevor
*
* GPU Neural Network
* Maintains network state and invokes functions on the GPU
*
*/
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <boost/lexical_cast.hpp>
#include <thrust/device_vector.h>
#include "hiprand/hiprand_kernel.h"
#include "GPUNet.h"
#include "NetIO.h"
#include "NetTrainer.h"
/*
* ------------ CUDA ------------
*/
/**
* Get a random number within a given float range
* \param min float
* \param max float
* \param i int
* \param *global hiprandState_t
*/
__device__ float get_random_range(float min, float max, int i, hiprandState_t *global) {
hiprandState_t local = global[i];
float r = hiprand_uniform(&local);
global[i] = local;
return min + r * (max - min);
}
/*
* Get the weight from i in layer1 to j layer2 given the array of weights between them.
* n_layer is the number of nodes in the layer containing i.
*/
__device__ __inline__ float get_weight(float* weights, int n_layer, int i, int j) {
//return weights[(n_layer+1)*j + i];
return weights[n_layer*i + j];
}
__device__ __inline__ void set_weight(float* weights, int n_layer, int i, int j, float v) {
//weights[(n_layer+1)*j + i] = v;
weights[n_layer*i + j] = v;
}
/**
* Compute the sigmoid value of a given float
* \param x the value to compute the sigmoid of
*/
__device__ __inline__ float sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}
/**
* Clamp the output to 0 or 1 if within .1
*\param f the value to clamp
*/
__device__ int clamp(float f) {
if (f < .1) {
return 0;
} else if (f > .9) {
return 1;
} else {
return -1;
}
}
/*
*
* ------------- Initialization kernels ---------------
*
*/
/**
* Initialize random seeds in CUDA, will initalize blocksize seeds
*/
__global__ void curand_setup(hiprandState_t *state) {
unsigned int seed = (unsigned int)clock64();
int id = threadIdx.x;
hiprand_init(seed, id, 0, &state[id]);
}
/**
* initialize nodes to 0 or 1 if bias
* generic
*/
__global__ void init_nodes_layer_v2(int n, float *nodes) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
if (i == n-1)
nodes[i] = 1;
else
nodes[i] = 0;
}
}
/**
* set all output nodes to 0
*/
__global__ void init_nodes_output_v2(int n, float *output) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
output[i] = 0;
}
}
__global__ void init_weights_v2(int n1, int n2, float *weights, hiprandState_t *state) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// r is the range for random values
if (i < (n1+1)*n2) {
float r = 1.0 / sqrt((float)blockDim.x-1);
weights[i] = get_random_range(-r, r, threadIdx.x, state);
}
}
__global__ void init_deltas_v2(unsigned int n1, unsigned int n2, float *deltas) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < (n1+1)*n2) {
deltas[i] = 0;
}
}
/* --------------- Referencing and simple set function ---------------
* set bias
*
*/
//used when copying patterns to device
__global__ void set_bias(int n_input, float *d_inp) {
d_inp[n_input] = 1;
}
/*
* -------------- Error calculation ---------------
* output_correct
* mse_sum
*
*/
__device__ int d_num_correct = 0;
__device__ float d_acc = 0;
__device__ float d_mse_sum = 0;
__device__ float d_mse = 0; //current mse
__global__ void output_correct_v2(float *output, float *d_set, int t, int n_output) {
float *target = &(d_set[t]);
int n = 0;
for (int i = 0; i < n_output; ++i) {
if (clamp(output[i]) == clamp(target[i])) {
++n;
}
}
d_num_correct += (n == n_output);
}
__global__ void calc_acc(int n_patterns) {
d_acc = ((float)d_num_correct/n_patterns * 100);
d_num_correct = 0;
}
/** int fpp
* single threaded
*/
__global__ void mse_sum_v2(float *output, float *d_set, int t, int n_output) {
float sum = 0;
float *target = &(d_set[t]);
for (int i = 0; i < n_output; ++i) {
sum += pow(output[i] - target[i], 2);
}
d_mse_sum += sum;
}
/**
* single threaded
*/
__global__ void calc_mse(int n_output, int n_patterns) {
d_mse = d_mse_sum / (n_output * n_patterns);
d_mse_sum = 0;
}
/*
* ---- feed forward kernels -----------
*
* method 1 calculates each node in the next layer with a single thread computing for each output node
* method 2 has a thread for each term in the linear combination to compute the output
* then the activation is computed after syncing threads.
*/
/*
* to measure bandwidth:
* (bytes read + bytes writen) / (time secs * 10^9) = gb
*
* bytes read = 4* ((n_layer1+1)*2),
* bytes written = 4* (n_layer2)
* total/thread = 4*((n_layer1+1)*2 + n_layer2)
* threads l1 -> l2 = n_hidden
* threads l2 -> l3 = n_output
*
* total_l1->l2 = n_hidden*4*((n_layer1+1)*2 + n_layer2)
* total_l2->l3 = n_output*4*((n_layer2+1)*2 + n_layer3)
*
* total = total_l1->l2 + total_l2->l3;
*/
__global__ void feed_forward_layer_v1(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
int n = threadIdx.x; // node to compute;
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
r += layer1[i] * get_weight(weights, n_layer1, i, n);
}
layer2[n] = sigmoid(r);
}
/*
* Generic version, called with pow of 2 threads
*/
__global__ void feed_forward_layer_v1_2(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
unsigned int n = blockIdx.x * blockDim.x+threadIdx.x; // node to compute;
if (n < n_layer2) {
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
//r += layer1[i] * weights[(n_layer1+1)*n + i];
//r += layer1[i] * get_weight(weights, n_layer1, i, n);
//r += layer1[i] * get_weight(weights, n_layer2, i, n);
r += layer1[i] * weights[(n_layer2)*i + n];
//printf("l2: n=%d, r=%f, input[%d]=%f, weight[%d,%d]=%f, t = %f\n", n, r, i, layer1[i],i,n,weights[(n_layer1+1)*n+i], (layer1[i] * weights[(n_layer1+1)*n + i]) );
}
layer2[n] = sigmoid(r);
}
}
__global__ void feed_forward_layer_v1_2_flat(int n_layer1, int n_layer2, float* d_set, int ind, float* layer2, float* weights) {
unsigned int n = blockIdx.x * blockDim.x+threadIdx.x; // node to compute;
if (n < n_layer2) {
//printf("n=%d, n_layer2=%d\n",n, n_layer2);
float *layer1 = &(d_set[ind]);
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
//r += layer1[i] * weights[(n_layer1+1)*n + i];
//r += layer1[i] * get_weight(weights, n_layer1, i, n);
//r += layer1[i] * get_weight(weights, n_layer2, i, n);
r += layer1[i] * weights[(n_layer2)*i + n];
//printf("l1: n=%d, r=%f, input[%d]=%f, weight[%d,%d]=%f, t = %f\n", n, r, i, layer1[i],i,n,weights[(n_layer1+1)*n+i], (layer1[i] * weights[(n_layer1+1)*n + i]) );
}
//printf("n = %d, sigmoid(%f)=%f\n",n, r,sigmoid(r));
layer2[n] = sigmoid(r);
}
}
/*
* calc each term of linear combination in separate thread,
* store in shared memory. So reduction in same kernel.
* Works only if num inputs is less than reasonable blocksize, probably 1024 max.
* Reduction code adapted from: NVIDIA presentation
* http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*
*/
template <unsigned int blockSize>
__global__ void feed_forward_layer_v2(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
extern __shared__ float terms[];
unsigned int n = blockIdx.x; // node to compute;
unsigned int tid = threadIdx.x;
terms[tid] = 0;
if (n < n_layer2 && tid <= n_layer1)
//terms[tid] = layer1[tid] * get_weight(weights, n_layer1, tid, n);
//terms[tid] = layer1[tid] * weights[(n_layer1+1)*n + tid];
//terms[tid] = layer1[tid] * get_weight(weights, n_layer2, tid, n);
terms[tid] = layer1[tid] * weights[(n_layer2)*tid + n];
__syncthreads();
// if (terms[tid] != 0)
// printf("l2: terms[%d]=%f\n", tid, terms[tid]);
if (blockSize >= 1024) { if (tid < 512) { terms[tid] += terms[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { terms[tid] += terms[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
layer2[n] = sigmoid(terms[0]);
//__syncthreads();
//printf("terms[%d]=%f\n", tid, terms[tid]);
}
template <unsigned int blockSize>
__global__ void feed_forward_layer_v2_flat(int n_layer1, int n_layer2, float* d_set, int ind, float* layer2, float* weights) {
extern __shared__ float terms[];
unsigned int n = blockIdx.x; // node to compute;
unsigned int tid = threadIdx.x;
terms[tid] = 0;
if (n < n_layer2 && tid <= n_layer1) {
float *layer1 = &(d_set[ind]);
//terms[tid] = layer1[tid] * get_weight(weights, n_layer1, tid, n);
//terms[tid] = layer1[tid] * weights[(n_layer1+1)*n + tid];
//terms[tid] = layer1[tid] * get_weight(weights, n_layer2, tid, n);
terms[tid] = layer1[tid] * weights[(n_layer2)*tid + n];
}
__syncthreads();
// if (terms[tid] != 0)
// printf("l1: terms[%d]=%f\n", tid, terms[tid]);
if (blockSize >= 1024) { if (tid < 512) { terms[tid] += terms[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { terms[tid] += terms[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
layer2[n] = sigmoid(terms[0]);
//__syncthreads();
//printf("terms[%d]=%f\n", tid, terms[tid]);
}
__global__ void clamp_outputs(float *output, int n) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
output[i] = clamp(output[i]);
}
}
/*
*
*
* ------------ backprop kernels ---------
* It is slightly faster if I manually inline the calc_output_gradient and calc_hidden_gradient functions.
* But VERY slightly. About 10 ms cumulatively over 1000 iterations. So insignificant I'm not
* going to mess with it.
*
*/
/**
* Compute the output gradient given specific output and target values
* \param output float
* \param target float
*/
__device__ __inline__ float calc_output_gradient(float output, float target) {
return output * (1 - output) * (target - output);
}
/*
* called generically, pow of 2 threads
*/
__global__ void output_error_gradients_v2(float* output, float* d_set, int t, float* output_err_gradients, int no) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < no) {
float *target = &(d_set[t]);
output_err_gradients[i] = calc_output_gradient(output[i], target[i]);
//printf("out_err_grad[%d] = %f, output = %f, target = %f\n", i, output_err_gradients[i], output[i], target[i]);
}
}
/*
* called generically with power of 2 threads
*/
__global__ void update_hidden_output_deltas_v2(int nh, int no, float l_rate, float momentum,
float* hidden, float* output_err_gradients, float* delta_ho) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (nh+1)*no) { // if in range
//int j = x % (nh+1); //hidden node
//int k = x / (nh+1);
int j = x / no;
int k = x % no;
//delta_ho[nh*k + j] = l_rate * hidden[j]hidden_err_grad * output_err_gradients[k] + momentum * delta_ho[nh*k + j];
//NOTE: likely to be more hidden nodes than output nodes so more advantageous to keep j coalesced
delta_ho[x] = l_rate * hidden[j] * output_err_gradients[k] + momentum * delta_ho[x];
//printf("x=%d, delta_ho(%d, %d) = %f, l_rate = %f, hidden[%d] = %f, out_err_gradients[%d] = %f, momentum = %f\n",
// x, j, k, delta_ho[x], l_rate, j, hidden[j], k, output_err_gradients[k], momentum);
}
}
__device__ __inline__ float calc_hidden_gradient(int j, int nh, int no, float* hidden, float* d_ho_weights, float* output_err_gradients) {
//get sum of hidden->output weights * output error gradients
float s = 0;
for (int k = 0; k < no; ++k)
//s += get_weight(d_ho_weights, nh, j, k) * output_err_gradients[k];
s += get_weight(d_ho_weights, no, j, k) * output_err_gradients[k];
//s += d_ho_weights[(nh+1)*k + j] * output_err_gradients[k];
//return error gradient
return hidden[j] * (1 - hidden[j]) * s;
}
/*
* called generically, pow of 2 threads
*/
__global__ void hidden_error_gradients_v2(int nh, int no, float* hidden, float* d_ho_weights, float* hidden_err_gradients, float* output_err_gradients) {
unsigned int j = blockIdx.x * blockDim.x+threadIdx.x;
if (j < nh) { //NOTE: another bug, had (j < (nh+1)*no), only nh nodes need calculated
hidden_err_gradients[j] = calc_hidden_gradient(j, nh, no, hidden, d_ho_weights, output_err_gradients);
//printf("hidden_err_grad[%d] = %f\n", j, hidden_err_gradients[j]);
}
}
/*
* num blocks = num hidden nodes
* num threads per block = 128, 256 etc
*
* This can be used as long as the number of output nodes is less than 128 or say 256.
* If the output is a single node the other way is likely faster.
*/
template <unsigned int blockSize>
__global__ void hidden_error_gradients_v3(int nh, int no, float* hidden, float* d_ho_weights, float* hidden_err_gradients, float* output_err_gradients) {
extern __shared__ float terms[]; // the number of terms will be equal to the number of output nodes
unsigned int j = blockIdx.x; //hidden node gradient to compute
unsigned int tid = threadIdx.x; //
terms[tid] = 0;
if (j < nh && tid < no) { //no bias on output so not <=
//terms[tid] = get_weight(d_ho_weights, nh, j, tid) * output_err_gradients[tid];
terms[tid] = get_weight(d_ho_weights, no, j, tid) * output_err_gradients[tid];
//terms[tid] = d_ho_weights[(nh+1)*tid + j] * output_err_gradients[tid];
}
__syncthreads();
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
hidden_err_gradients[j] = hidden[j] * (1 - hidden[j]) * terms[0];
}
/*
* called with any number of blocks / threads
* normally, 128 or other power of 2
*/
//TODO: perhaps there is a way to store the hidden_err_gradient[j] in shared memory
__global__ void update_input_hidden_deltas_v2(int ni, int nh, float l_rate, float momentum,
float* d_set, int i, float* hidden_err_gradients, float* delta_ih) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (ni+1)*nh) {
float *input = &(d_set[i]);
//int i = x % (ni+1); //input node, NOTE: same bug as before
//int j = x / (ni+1);
int i = x / nh;
int j = x % nh;
//delta_ih[ni*j + i] = l_rate * input[i] * hidden_err_gradients[j] + momentum * delta_ih[ni*j + i];
//NOTE: usually more input nodes than hidden nodes so keep input access coalesced
delta_ih[x] = l_rate * input[i] * hidden_err_gradients[j] + momentum * delta_ih[x];
//printf("x=%d, delta_ih(%d, %d) = %f, l_rate = %f, input[%d] = %f, hidden_err_gradients[%d] = %f, momentum = %f\n",
// x, i, j, delta_ih[x], l_rate, i, input[i], j, hidden_err_gradients[j], momentum);
}
}
/*
* called generically with power of 2 threads
*/
__global__ void update_weights_v2(int n1, int n2, float *d_weights, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
//Indexing is irrelevant here
d_weights[x] += deltas[x];
}
}
__constant__ __device__ float delta_min = -0.01;
__constant__ __device__ float delta_max = 0.01;
/*
* splitting kernels to save stochastic update a few comparisons
*/
__global__ void update_weights_batch_v2(int n1, int n2, float *d_weights, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
//Indexing is irrelevant here
if (deltas[x] > delta_max) //using batch
d_weights[x] += delta_max;
else if (deltas[x] < delta_min)
d_weights[x] += delta_min;
else
d_weights[x] += deltas[x];
deltas[x] = 0;
}
}
/*
*
* ------- RProp Kernels -----------
*
*/
/*
* called generically, pow of 2 threads
*/
__global__ void output_error_gradients_rprop(float* output, float* target, float* output_err_gradients, float* output_err_gradients_tmp, int no) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < no) {
output_err_gradients_tmp[i] = output_err_gradients[i];
output_err_gradients[i] = calc_output_gradient(output[i], target[i]);
//printf("out_err_grad[%d] = %f, output = %f, target = %f\n", i, output_err_gradients[i], output[i], target[i]);
}
}
__global__ void update_hidden_output_deltas_rprop(int nh, int no, float step_p, float step_m, float d_max, float d_min,
float* hidden, float* output_err_gradients, float* output_err_gradients_tmp, float* delta_ho) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (nh+1)*no) { // if in range
int j = x % (nh+1); //input node
int k = x % no; //hidden node
int r = output_err_gradients[x] * output_err_gradients_tmp[x];
if (r > 0) {
delta_ho[nh*k + j] = min(delta_ho[nh*k + j] * step_p, d_max);
} else if (r < 0) {
delta_ho[nh*k + j] = max(delta_ho[nh*k + j] * step_m, d_min);
} else {
//TODO: need something here for start when delta = 0
}
}
}
__global__ void update_weights_rprop(int n1, int n2, float *d_weights, float* gradients, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
int i = x % (n1+1); //layer 1 node, NOTE: same bug
int j = x % n2; //layer 2 node
int sign = (gradients[j] > 0) - (gradients[j] < 0);
d_weights[n1*j + i] = d_weights[n1*j + i] - sign*deltas[n1*j + i];
}
}
/*
*
* --------- Debugging ------------
*
*/
__global__ void print_gpu_net(int n_input, int n_hidden, int n_output,
float *hidden, float *output, float *ih_weights, float *ho_weights) {
//for (int i = 0; i <= n_input; ++i) {
// printf("input %d: %f, ", i, input[i]);
//}
//printf("\n");
for (int i = 0; i <= n_input; ++i) {
for (int j = 0; j < n_hidden; ++j) {
printf("ih weight (%d,%d,%d): %f, ", i, j, (n_input+1)*j + i, get_weight(ih_weights, n_input, i, j));
}
}
printf("\n");
for (int i = 0; i <= n_hidden; ++i) {
printf("hidden %d: %f, ", i, hidden[i]);
}
printf("\n");
for (int i = 0; i <= n_hidden; ++i) {
for (int j = 0; j < n_output; ++j) {
printf("ho weight (%d,%d,%d): %f, ", i, j, (n_hidden+1)*j + i, get_weight(ho_weights, n_hidden, i, j));
}
}
printf("\n");
for (int i = 0; i < n_output; ++i) {
printf("output %d: %f, ", i, output[i]);
}
printf("\n");
}
__global__ void print_target(int n_output, float *target) {
for (int i = 0; i < n_output; ++i) {
printf("target[%d] = %f\n", i, target[i]);
}
}
__global__ void print_input(int n_input, float *input) {
for (int i = 0; i < n_input+1; i++) {
printf("input[%d] = %f\n", i, input[i]);
}
}
/*
* ---------- Constructors -------------
*/
GPUNet::GPUNet() {
GPUNet::init_vars();
}
GPUNet::GPUNet(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type=GPUNetSettings::STANDARD) {
GPUNet::init_vars();
GPUNet::init_structure(ni, no, hidden_pct, net_type);
GPUNet::init_nio();
GPUNet::set_bsizes();
}
GPUNet::~GPUNet() {
hipFree(d_input);
hipFree(d_hidden);
hipFree(d_output);
hipFree(d_target);
hipFree(d_ih_weights);
hipFree(d_ho_weights);
hipFree(d_ih_deltas);
hipFree(d_ho_deltas);
hipFree(d_hid_err_gradients);
hipFree(d_out_err_gradients);
/*
//I'm getting a bad resource handle error at this line
CUDA_CHECK_RETURN(hipStreamDestroy(bprop_stream));
CUDA_CHECK_RETURN(hipStreamDestroy(err_calc_stream));
CUDA_CHECK_RETURN(hipStreamDestroy(weight_update_stream1));
CUDA_CHECK_RETURN(hipStreamDestroy(weight_update_stream2));
CUDA_CHECK_RETURN(hipStreamDestroy(train_stream1));
CUDA_CHECK_RETURN(hipStreamDestroy(train_stream2));
CUDA_CHECK_RETURN(hipStreamDestroy(copy_stream));
CUDA_CHECK_RETURN(hipEventDestroy(event1));
CUDA_CHECK_RETURN(hipEventDestroy(event2));*/
delete[] h_output;
delete[] h_ih_weights;
delete[] h_ho_weights;
delete[] gpu_mem;
delete nio;
}
bool GPUNet::load_netfile(std::string net_file) {
std::cout << "Initializing from net file: " << net_file << std::endl;
GPUNet::init_nio();
bool loaded = GPUNet::read_net(net_file);
GPUNet::set_bsizes();
return loaded;
}
void GPUNet::init(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type) {
GPUNet::init_structure(ni, no, hidden_pct, net_type);
GPUNet::init_nio();
GPUNet::set_bsizes();
}
/*
* -------------- public ---------------
*/
void GPUNet::init_nio() {
nio = new NetIO();
nio->set_gnet(this);
}
void GPUNet::init_structure(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type) {
if (n_input != 0) { // constructor initializing nodes has been called, error out
std::cerr << "Network has already been initialized" << std::endl;
} else if (ni != 0) { // if not empty constructor
n_input = ni;
n_output = no;
GPUNet::net_type = net_type;
if (net_type == GPUNetSettings::STANDARD) {
n_hidden = ceil(hidden_pct*ni);
} else if (net_type == GPUNetSettings::GPU_ARCH_OPT) {
//get first multiple of 128 greater than 2.0/3.0*ni
n_hidden = (hidden_pct*ni+127) / 128 * 128;
} else {
std::cerr << "Invalid network type: " << net_type << std::endl;
exit(1);
}
}
}
void GPUNet::init_vars() {
max_epochs = GPUNetSettings::GPU_MAX_EPOCHS;
l_rate = GPUNetSettings::GPU_LEARNING_RATE;
momentum = GPUNetSettings::GPU_MOMENTUM;
desired_acc = GPUNetSettings::GPU_DESIRED_ACCURACY;
batching = GPUNetSettings::GPU_USE_BATCH;
save_freq = GPUNetSettings::GPU_SAVE_FREQUENCY;
base_file_path = GPUNetSettings::GPU_BASE_FILE_NAME;
CUDA_CHECK_RETURN(hipGetDeviceCount(&n_gpus));
epoch = 0;
trainingSetAccuracy = 0;
validationSetAccuracy = 0;
generalizationSetAccuracy = 0;
trainingSetMSE = 0;
validationSetMSE = 0;
generalizationSetMSE = 0;
start = 0;
finish = 0;
n_input = 0;
n_hidden = 0;
n_output = 0;
gpu_opt_bprop_bsize = 0;
gpu_opt_ff_ih_bsize = 0;
gpu_opt_ff_ho_bsize = 0;
/*
* device
*/
d_input = NULL;
d_hidden = NULL;
d_output = NULL;
d_target = NULL;
d_ih_weights = NULL;
d_ho_weights = NULL;
d_ih_deltas = NULL;
d_ho_deltas = NULL;
d_hid_err_gradients = NULL;
d_out_err_gradients = NULL;
/*
* host validation
*/
h_output = NULL;
h_ih_weights = NULL;
h_ho_weights = NULL;
//init gpu mem to 0 for each gpu
gpu_mem = NULL;
}
void GPUNet::set_bsizes() {
std::cout << "Finding ideal block sizes: ";
//get first power of 2 larger than n_output
gpu_opt_bprop_bsize = pow2roundup(n_output);
std::cout << "bprop bsize=" << gpu_opt_bprop_bsize << ", ";
gpu_opt_ff_ih_bsize = pow2roundup(n_input+1);
gpu_opt_ff_ho_bsize = pow2roundup(n_hidden+1);
std::cout << "ff ih bsize=" << gpu_opt_ff_ih_bsize << ", ";
std::cout << "ff ho bsize=" << gpu_opt_ff_ho_bsize << std::endl;
}
void GPUNet::alloc_host_mem() {
h_output = new float[n_output];
h_ih_weights = new float[(n_input+1)*n_hidden];
h_ho_weights = new float[(n_hidden+1)*n_output];
gpu_mem = new size_t[n_gpus];
memset(gpu_mem, 0, n_gpus*sizeof(size_t));
}
/*
* allocate memory on device for
* input, hidden, output, target
* ih_weights, ho_weights
* ih_deltas, ho_deltas
* hid_err_gradients
* out_err_gradients
*/
void GPUNet::alloc_dev_mem() {
//nodes
CUDA_CHECK_RETURN(hipMalloc((void**)&d_input, (n_input+1)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_hidden, (n_hidden+1)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_output, (n_output)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_target, (n_output)*sizeof(float)));
add_gpu_mem((n_input+n_hidden+(2*n_output)+2)*sizeof(float));
//weights
CUDA_CHECK_RETURN(hipMalloc((void**)&d_ih_weights, ((n_input+1)*n_hidden)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_ho_weights, ((n_hidden+1)*n_output)*sizeof(float)));
add_gpu_mem(((n_input+1)*n_hidden + (n_hidden+1)*n_output)*sizeof(float));
//create delta arrays, include bias
CUDA_CHECK_RETURN(hipMalloc((void**)&d_ih_deltas, ((n_input+1)*n_hidden)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_ho_deltas, ((n_hidden+1)*n_output)*sizeof(float)));
add_gpu_mem(((n_input+1)*n_hidden + (n_hidden+1)*n_output)*sizeof(float));
//error gradients
CUDA_CHECK_RETURN(hipMalloc((void**)&d_hid_err_gradients, (n_hidden+1)*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_out_err_gradients, (n_output+1)*sizeof(float)));
add_gpu_mem((n_hidden + n_output + 2)*sizeof(float));
CUDA_CHECK_RETURN(hipStreamCreate(&bprop_stream));
CUDA_CHECK_RETURN(hipStreamCreate(&err_calc_stream));
CUDA_CHECK_RETURN(hipStreamCreate(&weight_update_stream1));
CUDA_CHECK_RETURN(hipStreamCreate(&weight_update_stream2));
CUDA_CHECK_RETURN(hipStreamCreate(©_stream));
CUDA_CHECK_RETURN(hipEventCreate(&event1));
CUDA_CHECK_RETURN(hipEventCreate(&event2));
}
/*
* Note: assumes sizes of networks are the same
* This is for testing purposes so that
* I can have identical networks.
*/
void GPUNet::init_from_net(Net &net, NetData &d) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//copy first pattern to input neurons so it is copied to device, instead of zeros
//for (int i = 0; i < net.n_input; ++i) {
//net.inputNeurons[i] = d.get_training_dataset()->training_set[0]->input[i];
//}
// so hidden and output initialized to 0
CUDA_CHECK_RETURN(hipMemcpy(d_input, net.inputNeurons, (net.n_input)*sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_hidden, net.hiddenNeurons, (net.n_hidden)*sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_output, net.outputNeurons, (net.n_output)*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_input, d_input);
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_hidden, d_hidden);
CUDA_CHECK_RETURN(hipMemcpy(d_ih_weights, net.wInputHidden, (net.n_input+1)*(net.n_hidden)*sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_ho_weights, net.wHiddenOutput, (net.n_hidden+1)*(net.n_output)*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_input+1)*n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input+1, n_hidden, d_ih_deltas);
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_hidden+1)*n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, n_output, d_ho_deltas);
std::cout << "Data copied to device" << std::endl;
}
void GPUNet::init_net() {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//init nodes to all 0
hipLaunchKernelGGL(( init_nodes_layer_v2), dim3((n_input+1+threads-1)/threads), dim3(threads), 0, 0, n_input+1, d_input);
hipLaunchKernelGGL(( init_nodes_layer_v2), dim3((n_hidden+1+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, d_hidden);
hipLaunchKernelGGL(( init_nodes_output_v2), dim3((n_output+threads-1)/threads), dim3(threads), 0, 0, n_output, d_output);
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_input, d_input);
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_hidden, d_hidden);
//init weights to random vals
hiprandState_t *state;
CUDA_CHECK_RETURN(hipMalloc(&state, threads*sizeof(hiprandState_t)));
hipLaunchKernelGGL(( curand_setup), dim3(1), dim3(threads), 0, 0, state);
hipLaunchKernelGGL(( init_weights_v2), dim3(((n_input+1)*n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input+1, n_hidden, d_ih_weights, state);
hipLaunchKernelGGL(( init_weights_v2), dim3(((n_hidden+1)*n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, n_output, d_ho_weights, state);
CUDA_CHECK_RETURN(hipFree(state));
//init deltas to 0
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_input+1)*n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input+1, n_hidden, d_ih_deltas);
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_hidden+1)*n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, n_output, d_ho_deltas);
CUDA_CHECK_RETURN(hipPeekAtLastError());
}
void GPUNet::set_learning_rate(float lr) {
l_rate = lr;
}
void GPUNet::set_momentum(float m) {
momentum = m;
}
void GPUNet::set_training_params(float lr, float m, bool b) {
l_rate = lr;
momentum = m;
batching = b;
}
void GPUNet::set_max_epochs(int me) {
max_epochs = me;
}
void GPUNet::set_save_frequency(int f) {
save_freq = f;
}
void GPUNet::set_desired_accuracy(float acc) {
desired_acc = acc;
}
void GPUNet::set_stopping_conds(int me, float acc) {
max_epochs = me;
desired_acc = acc;
}
void GPUNet::set_base_file_name(std::string f) {
base_file_path = f;
}
/*
* to keep it simple, run in 1 thread
*/
void GPUNet::print_net() {
CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( print_gpu_net), dim3(1), dim3(1), 0, 0, n_input, n_hidden, n_output,
d_hidden, d_output, d_ih_weights, d_ho_weights);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
}
/*
* transfer weights back to host
* write important data (num_epochs, layers, nodes/layer, l_rate, momentum, max_epochs, desired_acc, current mse, current acc)
*
*/
bool GPUNet::write_net(std::string fname) {
//need to copy mse and acc back to host
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "current acc=" << trainingSetAccuracy << ", current mse=" << trainingSetMSE << std::endl;
if (!nio->write_net(fname)) {
std::cerr << "Write failed" << std::endl;
return false;
}
return true;
}
bool GPUNet::read_net(std::string fname) {
if (!nio->read_net(fname)) {
std::cerr << "Read failed" << std::endl;
return false;
}
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//init nodes to 0
hipLaunchKernelGGL(( init_nodes_layer_v2), dim3((n_input+1+threads-1)/threads), dim3(threads), 0, 0, n_input+1, d_input);
hipLaunchKernelGGL(( init_nodes_layer_v2), dim3((n_hidden+1+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, d_hidden);
hipLaunchKernelGGL(( init_nodes_output_v2), dim3((n_output+threads-1)/threads), dim3(threads), 0, 0, n_output, d_output);
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_input, d_input);
hipLaunchKernelGGL(( set_bias), dim3(1),dim3(1), 0, 0, n_hidden, d_hidden);
//init deltas to 0
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_input+1)*n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input+1, n_hidden, d_ih_deltas);
hipLaunchKernelGGL(( init_deltas_v2), dim3(((n_hidden+1)*n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden+1, n_output, d_ho_deltas);
return true;
}
void GPUNet::run_test_set(TrainingDataSet *tset) {
std::cout << std::endl << "Running test set: " << std::endl;
calc_dataset_parameters(tset);
float* d_test_set;
//TODO: this assumes that the validation set always fits in GPU memory. Fine for now.
CUDA_CHECK_RETURN(hipMalloc((void**)&d_test_set, tset->n_validation*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(hipMemcpy(d_test_set, tset->validation_set, tset->n_validation*tset->fpp*sizeof(float), hipMemcpyHostToDevice));
for (int i = 0; i < tset->n_validation; ++i) {
//wait for error calculation to finish before doing next feed forward iteration
CUDA_CHECK_RETURN(hipStreamSynchronize(err_calc_stream));
//Because all error calculations are done in the same stream the next one cannot begin before the previous one finishes
//even if it is called before it is finished. So no need to synchronize.
//called with index of start position of target
feed_forward_v1_2(d_test_set, i*tset->fpp);
hipLaunchKernelGGL(( mse_sum_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_test_set, i*tset->fpp+n_input+1, n_output);
hipLaunchKernelGGL(( output_correct_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_test_set, i*tset->fpp+n_input+1, n_output);
}
hipLaunchKernelGGL(( calc_mse), dim3(1), dim3(1), 0, err_calc_stream, n_output, tset->n_validation);
hipLaunchKernelGGL(( calc_acc), dim3(1), dim3(1), 0, err_calc_stream, tset->n_validation);
copy_error_to_host(&validationSetMSE, &validationSetAccuracy);
std::cout << "Test set MSE = " << validationSetMSE << std::endl;
std::cout << "Test set ACC = " << validationSetAccuracy << std::endl;
//free training set
CUDA_CHECK_RETURN(hipFree(d_test_set));
}
/*
* run the input through the network
*/
float* GPUNet::evaluate(float* input) {
//copy to device
//feed forward
//copy back output
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
float *h_out = new float[n_output];
CUDA_CHECK_RETURN(hipMemcpy(d_input, input, (n_input)*sizeof(float), hipMemcpyHostToDevice));
feed_forward_v1_2(d_input, 0);
hipLaunchKernelGGL(( clamp_outputs), dim3((n_output+threads-1)/threads), dim3(threads), 0, 0, d_output, n_output);
CUDA_CHECK_RETURN(hipMemcpy(h_out, d_output, n_output*sizeof(float), hipMemcpyDeviceToHost));
return h_out;
}
int GPUNet::get_num_input() {
return n_input;
}
int GPUNet::get_num_hidden() {
return n_hidden;
}
int GPUNet::get_num_output() {
return n_output;
}
void GPUNet::calc_dataset_parameters(TrainingDataSet *tset) {
std::cout << "Determining data set statistics" << std::endl;
// calc num patterns copyable
// num patterns = integer div of available memory / mem for single pattern
int bytes_per_pattern = sizeof(float)*((n_input+1)+(n_output));
int cur_dev = get_current_device();
std::cout << " bytes per pattern = "<<bytes_per_pattern<<std::endl;
std::cout << " total dev mem = "<< total_dev_mem(cur_dev)<<std::endl;
std::cout << " current mem usage = "<< current_mem_usage(cur_dev)<<std::endl;
int available_mem = total_dev_mem(cur_dev) - current_mem_usage(cur_dev);
std::cout << " available mem = "<<available_mem<<std::endl;
std::cout << " tset.size = "<<tset->n_patterns<<std::endl;
n_copyable_patterns = available_mem / bytes_per_pattern;
//ensure n_copyable_patterns is even and can be split into 2 buffers
if (n_copyable_patterns % 2 == 1) {
--n_copyable_patterns;
}
if (n_copyable_patterns < 0) {
std::cerr << "";
}
if (n_copyable_patterns > tset->n_patterns) {
n_copyable_patterns = tset->n_patterns;
}
// calc num sections
// num_sections = ceil ( n_patterns / n_copyable_patterns)
n_sections = (tset->n_patterns + n_copyable_patterns - 1) / n_copyable_patterns;
std::cout << " n_copyable_patterns = "<<n_copyable_patterns<<", n_sections = "<<n_sections<<std::endl<<std::endl;
}
void GPUNet::train_net_sectioned_overlap(TrainingDataSet *tset) {
calc_dataset_parameters(tset);
std::cout << std::endl << "Neural Network Training Starting: " << std::endl
<< "----------------------------------------------------" << std::endl
<< "LR: " << l_rate << ", Momentum: " << momentum << ", Max Epochs: " << max_epochs << std::endl
<< n_input << " Input Neurons, " << n_hidden << " Hidden Neurons, " << n_output << " Output Neurons" << std::endl
<< "----------------------------------------------------" << std::endl << std::endl;
int buffer_size = n_copyable_patterns / 2;
float* d_training_buffer1;
float* d_training_buffer2;
//by default allocate array as large as possible
CUDA_CHECK_RETURN(hipMalloc((void**)&d_training_buffer1, buffer_size*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_training_buffer2, buffer_size*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(hipMemcpyAsync(d_training_buffer1, tset->training_set, buffer_size*tset->fpp*sizeof(float), hipMemcpyHostToDevice, copy_stream));
int p_start = buffer_size;
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << ", ";
//on even epochs, process data in buffer1
//copy data to buffer 2
if (p_start > tset->n_training) p_start = 0;
int p_end = p_start+buffer_size;
if (p_end > tset->n_training) p_end = tset->n_training;
//ensure finished copying before processing that buffer
CUDA_CHECK_RETURN(hipStreamSynchronize(copy_stream));
if (epoch % 2 == 0) {
CUDA_CHECK_RETURN(hipMemcpyAsync(d_training_buffer2, &(tset->training_set[p_start]), p_end-p_start, hipMemcpyHostToDevice, copy_stream));
run_training_epoch_dev(d_training_buffer1, buffer_size, tset->fpp);
} else {
CUDA_CHECK_RETURN(hipMemcpyAsync(d_training_buffer1, &(tset->training_set[p_start]), p_end-p_start, hipMemcpyHostToDevice, copy_stream));
run_training_epoch_dev(d_training_buffer2, buffer_size, tset->fpp);
}
p_start += buffer_size;
++epoch;
if (epoch % save_freq == 0) {
std::string fname = "nets/face_" + boost::lexical_cast<std::string>(epoch) + ".net";
std::cout << "Writing intermediary net " << fname << std::endl;
write_net(fname);
}
}
//out validation accuracy and MSE
std::cout << std::endl << "Training complete. Elapsed epochs: " << epoch << std::endl;
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "MSE = " << trainingSetMSE << std::endl;
std::cout << "ACC = " << trainingSetAccuracy << std::endl;
}
__global__ void printtset(float* set, int n) {
for (int i = 0; i < n; i++) {
printf("%f ", set[i]);
}
printf("\n");
}
void GPUNet::train_net_sectioned(TrainingDataSet *tset) {
calc_dataset_parameters(tset);
std::cout << std::endl << "Neural network training starting: " << std::endl
<< "----------------------------------------------------" << std::endl
<< "LR: " << l_rate << ", momentum: " << momentum << ", max epochs: " << max_epochs << std::endl
<< n_input << " input, " << n_hidden << " hidden, " << n_output << " output " << std::endl
<< "----------------------------------------------------" << std::endl << std::endl;
float* d_training_set;
//by default allocate array as large as possible
CUDA_CHECK_RETURN(hipMalloc((void**)&d_training_set, n_copyable_patterns*tset->fpp*sizeof(float)));
if (n_sections == 1) { // no section copying necessary
CUDA_CHECK_RETURN(hipMemcpy(d_training_set, tset->training_set, tset->n_training*tset->fpp*sizeof(float), hipMemcpyHostToDevice));
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << ", ";
run_training_epoch_dev(d_training_set, tset->n_training, tset->fpp);
++epoch;
//copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
//std::cout << "current mse = " << trainingSetMSE << ", current acc = " << trainingSetAccuracy << std::endl;
if (epoch % save_freq == 0) {
std::string fname = base_file_path + "_" + boost::lexical_cast<std::string>(epoch) + ".net";
std::cout << "Writing intermediary net " << fname << std::endl;
write_net(fname);
if (trainingSetAccuracy > desired_acc)
break; //just run test set
}
}
} else {
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << std::endl;
//copy a section and run partial epoch
for (int i = 0; i < n_sections; ++i) {
//copy patterns from [n_sections*n_patterns_copyable, (n_sections+1)*n_patterns_copyable)
int p_start = i * n_copyable_patterns;
int p_end = p_start + n_copyable_patterns;
if (p_end > tset->n_training) p_end = tset->n_training;
std::cout << "copying section="<<i<<", pstart="<< p_start << ", pend="<<p_end << std::endl;
CUDA_CHECK_RETURN(hipMemcpy(d_training_set, &(tset->training_set[p_start]), (p_end-p_start)*sizeof(float), hipMemcpyHostToDevice));
std::cout << "data copied" << std::endl;
run_training_epoch_dev(d_training_set, p_end-p_start, tset->fpp);
}
//once training set is complete increment epoch
++epoch;
}
}
//out validation accuracy and MSE
std::cout << std::endl << "Training complete. Elapsed epochs: " << epoch << std::endl;
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "MSE = " << trainingSetMSE << std::endl;
std::cout << "ACC = " << trainingSetAccuracy << std::endl;
//free training set
CUDA_CHECK_RETURN(hipFree(d_training_set));
run_test_set(tset);
}
void GPUNet::copy_error_to_host(float* mse, float* acc) {
CUDA_CHECK_RETURN(hipStreamSynchronize(err_calc_stream)); //make sure error calculation has completed.
CUDA_CHECK_RETURN(hipMemcpyFromSymbol(mse, d_mse, sizeof(float), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpyFromSymbol(acc, d_acc, sizeof(float), 0, hipMemcpyDeviceToHost));
}
void GPUNet::run_training_epoch_dev(float *set, int n_features, int fpp) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
start = clock();
if (batching) {
//if doing batch make sure weights have been updated after last epoch
CUDA_CHECK_RETURN(hipStreamSynchronize(weight_update_stream1));
CUDA_CHECK_RETURN(hipStreamSynchronize(weight_update_stream2));
}
for (int i = 0; i < n_features; ++i) {
//called with index of start position of target
if (!batching) {
//need to wait for weight update, otherwise no sync because gradients and deltas in same stream
CUDA_CHECK_RETURN(hipStreamSynchronize(weight_update_stream1));
CUDA_CHECK_RETURN(hipStreamSynchronize(weight_update_stream2));
}
feed_forward_v2(set, i*fpp);
backprop_v3(set, i*fpp, i*fpp+n_input+1);
}
if (batching) { //update weights here and reset deltas
CUDA_CHECK_RETURN(hipEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream1, event1, 0));
CUDA_CHECK_RETURN(hipEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream2, event2, 0));
hipLaunchKernelGGL(( update_weights_batch_v2), dim3(((n_output*(n_hidden+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream1, n_hidden, n_output, d_ho_weights, d_ho_deltas);
hipLaunchKernelGGL(( update_weights_batch_v2), dim3(((n_hidden*(n_input+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream2, n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
hipLaunchKernelGGL(( calc_mse), dim3(1), dim3(1), 0, err_calc_stream, n_output, n_features);
hipLaunchKernelGGL(( calc_acc), dim3(1), dim3(1), 0, err_calc_stream, n_features);
finish = clock();
std::cout << "time: " << ((double)finish-start)/CLOCKS_PER_SEC << std::endl;
}
void GPUNet::backprop_v2(float* d_set, int i, int t) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//maintain mse state
hipLaunchKernelGGL(( mse_sum_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_set, t, n_output);
hipLaunchKernelGGL(( output_correct_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_set, t, n_output);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
//float mse_sum = 0;
//CUDA_CHECK_RETURN(hipMemcpyFromSymbol(&mse_sum, d_mse_sum, sizeof(float), 0, hipMemcpyDeviceToHost));
//std::cout << "Current mse_sum = " << mse_sum << std::endl;
hipLaunchKernelGGL(( output_error_gradients_v2), dim3((n_output+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, d_output, d_set, t, d_out_err_gradients, n_output);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( update_hidden_output_deltas_v2), dim3(((n_output*(n_hidden+1))+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_hidden, n_output, l_rate, momentum, d_hidden, d_out_err_gradients, d_ho_deltas);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( hidden_error_gradients_v2), dim3((n_hidden+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(hipEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream1, event1, 0));
hipLaunchKernelGGL(( update_weights_v2), dim3(((n_output*(n_hidden+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream1, n_hidden, n_output, d_ho_weights, d_ho_deltas);
}
hipLaunchKernelGGL(( update_input_hidden_deltas_v2), dim3(((n_hidden*(n_input+1))+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_input, n_hidden, l_rate, momentum,
d_set, i, d_hid_err_gradients, d_ih_deltas);
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(hipEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream2, event2, 0));
hipLaunchKernelGGL(( update_weights_v2), dim3(((n_hidden*(n_input+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream2, n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
}
void GPUNet::backprop_v3(float* d_set, int i, int t) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//maintain mse state
hipLaunchKernelGGL(( mse_sum_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_set, t, n_output);
hipLaunchKernelGGL(( output_correct_v2), dim3(1), dim3(1), 0, err_calc_stream, d_output, d_set, t, n_output);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
//float mse_sum = 0;
//CUDA_CHECK_RETURN(hipMemcpyFromSymbol(&mse_sum, d_mse_sum, sizeof(float), 0, hipMemcpyDeviceToHost));
//std::cout << "Current mse_sum = " << mse_sum << std::endl;
hipLaunchKernelGGL(( output_error_gradients_v2), dim3((n_output+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, d_output, d_set, t, d_out_err_gradients, n_output);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( update_hidden_output_deltas_v2), dim3(((n_output*(n_hidden+1))+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_hidden, n_output, l_rate, momentum, d_hidden, d_out_err_gradients, d_ho_deltas);
//CUDA_CHECK_RETURN(hipDeviceSynchronize());
if (gpu_opt_bprop_bsize <= 1) {
hipLaunchKernelGGL(( hidden_error_gradients_v2), dim3((n_hidden+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 32) {
hipLaunchKernelGGL(( hidden_error_gradients_v3<32>), dim3(n_hidden), dim3(32), 32*sizeof(float), bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 64) {
hipLaunchKernelGGL(( hidden_error_gradients_v3<64>), dim3(n_hidden), dim3(64), 64*sizeof(float), bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 128) {
hipLaunchKernelGGL(( hidden_error_gradients_v3<128>), dim3(n_hidden), dim3(128), 128*sizeof(float), bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 256) {
hipLaunchKernelGGL(( hidden_error_gradients_v3<256>), dim3(n_hidden), dim3(256), 256*sizeof(float), bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else {
hipLaunchKernelGGL(( hidden_error_gradients_v2), dim3((n_hidden+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
}
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(hipEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream1, event1, 0));
hipLaunchKernelGGL(( update_weights_v2), dim3(((n_output*(n_hidden+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream1, n_hidden, n_output, d_ho_weights, d_ho_deltas);
}
hipLaunchKernelGGL(( update_input_hidden_deltas_v2), dim3(((n_hidden*(n_input+1))+n_threads-1)/n_threads), dim3(n_threads), 0, bprop_stream, n_input, n_hidden, l_rate, momentum,
d_set, i, d_hid_err_gradients, d_ih_deltas);
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(hipEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(hipStreamWaitEvent(weight_update_stream2, event2, 0));
hipLaunchKernelGGL(( update_weights_v2), dim3(((n_hidden*(n_input+1))+n_threads-1)/n_threads), dim3(n_threads), 0, weight_update_stream2, n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
}
void GPUNet::rprop(float *d_inp, float *d_tar) {
//int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//calc hidden out gradients
//
}
void GPUNet::feed_forward_v1_2(float* d_set, int i) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
hipLaunchKernelGGL(( feed_forward_layer_v1_2_flat), dim3((n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
hipLaunchKernelGGL(( feed_forward_layer_v1_2), dim3((n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
}
/*
* TODO: what if the first layer cannot be done using the reduction but the second layer can
*/
/*void GPUNet::feed_forward_v2(float* d_set, int i) {
switch (gpu_opt_ff_ih_bsize) {
case 1:
feed_forward_layer_v2_flat<1><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<1><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 2:
feed_forward_layer_v2_flat<2><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<2><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 4:
feed_forward_layer_v2_flat<4><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<4><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 8:
feed_forward_layer_v2_flat<8><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<8><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 16:
feed_forward_layer_v2_flat<16><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<16><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 32:
feed_forward_layer_v2_flat<32><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<32><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 64:
feed_forward_layer_v2_flat<64><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<64><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 128:
feed_forward_layer_v2_flat<128><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<128><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 256:
feed_forward_layer_v2_flat<256><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<256><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 512:
feed_forward_layer_v2_flat<512><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<512><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 1024:
feed_forward_layer_v2_flat<1024><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<1024><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
}
}*/
void GPUNet::feed_forward_v2(float* d_set, int i) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
if (gpu_opt_ff_ih_bsize <= 32) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<32>), dim3(n_hidden), dim3(32), 32*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 64) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<64>), dim3(n_hidden), dim3(64), 64*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 128) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<128>), dim3(n_hidden), dim3(128), 128*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 256) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<256>), dim3(n_hidden), dim3(256), 256*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 512) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<512>), dim3(n_hidden), dim3(512), 512*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 1024) {
hipLaunchKernelGGL(( feed_forward_layer_v2_flat<1024>), dim3(n_hidden), dim3(1024), 1024*sizeof(float), 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else {
hipLaunchKernelGGL(( feed_forward_layer_v1_2_flat), dim3((n_hidden+threads-1)/threads), dim3(threads), 0, 0, n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
}
if (gpu_opt_ff_ho_bsize <= 32) {
hipLaunchKernelGGL(( feed_forward_layer_v2<32>), dim3(n_output), dim3(32), 32*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 64) {
hipLaunchKernelGGL(( feed_forward_layer_v2<64>), dim3(n_output), dim3(64), 64*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 128) {
hipLaunchKernelGGL(( feed_forward_layer_v2<128>), dim3(n_output), dim3(128), 128*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 256) {
hipLaunchKernelGGL(( feed_forward_layer_v2<256>), dim3(n_output), dim3(256), 256*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 512) {
hipLaunchKernelGGL(( feed_forward_layer_v2<512>), dim3(n_output), dim3(512), 512*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 1024) {
hipLaunchKernelGGL(( feed_forward_layer_v2<1024>), dim3(n_output), dim3(1024), 1024*sizeof(float), 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else {
hipLaunchKernelGGL(( feed_forward_layer_v1_2), dim3((n_output+threads-1)/threads), dim3(threads), 0, 0, n_hidden, n_output, d_hidden, d_output, d_ho_weights);
}
}
bool GPUNet::validate_output(float* desired_output) {
//copy output back to host
CUDA_CHECK_RETURN(hipMemcpy(h_output, d_output, n_output*sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < n_output; ++i) {
//std::cout << "actual = " << desired_output[i] << ", calc = " << h_output[i] << std::endl;
if (abs(desired_output[i] - h_output[i]) > .005)
return false;
}
return true;
}
bool GPUNet::validate_weights(float *desired_ih_weights, float *desired_ho_weights) {
//copy inp hid weights to host
CUDA_CHECK_RETURN(hipMemcpy(h_ih_weights, d_ih_weights, (n_input+1)*n_hidden*sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(h_ho_weights, d_ho_weights, (n_hidden+1)*n_output*sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < (n_input+1)*n_hidden; ++i) {
if (abs(desired_ih_weights[i] - h_ih_weights[i]) > .001)
return false;
}
for (int i = 0; i < (n_hidden+1)*n_output; ++i) {
if (abs(desired_ho_weights[i] - h_ho_weights[i]) > .001)
return false;
}
return true;
}
void GPUNet::test_feed_forward(Net &net, NetData &d) {
std::cout << "Testing feed forward functions" << std::endl;
//std::cout << "feed forward CPU" << std::endl;
//net.print_network();
net.feed_forward(&(d.get_training_dataset()->training_set[0]));
//std::cout << "feed forward CPU time: " << ((float)(finish-start)) / CLOCKS_PER_SEC << "s\n\n";
//net.print_network();
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
std::cout << "Testing feedforward v1.2" << std::endl;
feed_forward_v1_2(d_training_set, 0);
std::cout << "Validates: " << validate_output(net.outputNeurons) << std::endl << std::endl;
//net.print_network();
//print_net();
CUDA_CHECK_RETURN(hipMemset(d_output, 0, n_output*sizeof(float)));
//std::cout << "Testing method 1.3" << std::endl;
//feed_forward_v1_3(&(d_training_set[0]));
//std::cout << "Validates: " << validate_output(net.outputNeurons) << "\n";
//net.print_network();
//print_net();
//CUDA_CHECK_RETURN(hipMemset(d_output, 0, n_output*sizeof(float)));
std::cout << "Testing feedforward v2" << std::endl;
feed_forward_v2(d_training_set, 0);
std::cout << "Validates: " << validate_output(net.outputNeurons) << std::endl << std::endl;
CUDA_CHECK_RETURN(hipMemset(d_output, 0, n_output*sizeof(float)));
/*std::cout << "Testing method 2.2" << std::endl;
feed_forward_v2_2();
std::cout << "Validates: " << validates(net.outputNeurons) << "\n";
CUDA_CHECK_RETURN(hipMemset(d_output, 0, n_output*sizeof(float)));*/
CUDA_CHECK_RETURN(hipFree(d_training_set));
}
void GPUNet::test_backprop(Net &net, NetData &d) {
std::cout << "Testing backpropagation functions" << std::endl;
NetTrainer nt(&net);
//std::cout << "CPU net 0" << std::endl;
//net.print_network();
net.feed_forward(&(d.get_training_dataset()->training_set[0]));
//std::cout << "CPU net 1" << std::endl;
//net.print_network();
nt.backprop(&(d.get_training_dataset()->training_set[0+n_input+1]));
//std::cout << "CPU net 2" << std::endl;
//net.print_network();
int i = 0, t = n_input+1;
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
// std::cout << "Testing backprop v2" << std::endl;
//// std::cout << std::endl << "GPU net 0" << std::endl;
//// print_net();
//// std::cout << std::endl;
//
// feed_forward_v1_2(d_training_set, i);
//// std::cout << "GPU net 1" << std::endl;
//// print_net();
//// std::cout << std::endl;
//
//// std::cout << "GPU net 2" << std::endl;
// backprop_v2(d_training_set, i, t);
// CUDA_CHECK_RETURN(hipDeviceSynchronize());
//// print_net();
//// std::cout << std::endl;
// std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl << std::endl;
std::cout << "Testing backprop v3" << std::endl;
//std::cout << std::endl << "GPU net 0" << std::endl;
//print_net();
//std::cout << std::endl;
//int i = 0, t = n_input+1;
feed_forward_v1_2(d_training_set, i);
//std::cout << "GPU net 1" << std::endl;
//print_net();
//std::cout << std::endl;
//std::cout << "GPU net 2" << std::endl;
backprop_v3(d_training_set, i, t);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
net.print_network();
print_net();
std::cout << std::endl;
std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl << std::endl;
// net.feed_forward(&(tset->training_set[tset->fpp]));
// nt.backprop(&(tset->training_set[tset->fpp+n_input+1]));
// i = tset->fpp, t = i+n_input+1;
// feed_forward_v1_2(d_training_set, i);
// backprop_v2(d_training_set, i, t);
// std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl;
}
void GPUNet::run_parallel(Net &net, NetData &d) {
std::cout << "Running in parallel" <<std::endl;
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
NetTrainer nt(&net);
// std::cout << "CPU network" << std::endl;
// net.print_network();
// std::cout << "GPU network" << std::endl;
// print_net();
// std::cout << std::endl;
int e = 0;
std::string r = "";
while (true) {
std::cout << "Epoch " << e++ << std::endl;
//for (int i = 0; i < d.get_training_dataset()->n_training; ++i) {
//int inp = i*tset->fpp;
//int tar = inp+n_input+1;
//net.feed_forward(&(d.get_training_dataset()->training_set[inp]));
//nt.backprop(&(d.get_training_dataset()->training_set[tar]));
nt.run_training_epoch(d.get_training_dataset());
run_training_epoch_dev(d_training_set, tset->n_training, tset->fpp);
//feed_forward_v1_2(d_training_set, inp);
//backprop_v2(d_training_set, inp, tar);
//std::cout << "CPU network" << std::endl;
//net.print_network();
//std::cout << "GPU network" << std::endl;
//print_net();
bool v = validate_weights(net.wInputHidden, net.wHiddenOutput);
std::cout << "Validates: " << v << std::endl;
if (!v) {
// std::cout << "CPU network" << std::endl;
// net.print_network();
// std::cout << "GPU network" << std::endl;
// print_net();
}
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "GPU error: " << trainingSetMSE << ", " << trainingSetAccuracy << std::endl;
std::cout << "CPU error: " << nt.trainingSetMSE << ", " << nt.trainingSetAccuracy<< std::endl;
std::getline(std::cin, r);
if (r == "exit") {
return;
}
//}
}
}
size_t GPUNet::current_mem_usage(int dev) {
return gpu_mem[dev];
}
/*
* ------------ private ------------
*/
void GPUNet::add_gpu_mem(int bytes) {
gpu_mem[get_current_device()] += bytes;
}
int GPUNet::get_current_device() {
int device;
hipGetDevice(&device);
return device;
}
size_t GPUNet::dataset_size(TrainingDataSet *tset) {
size_t tset_size = 0;
int fv_size = (n_input + n_output) * sizeof(float);
tset_size += fv_size * tset->n_training;
tset_size += fv_size * tset->n_generalization;
tset_size += fv_size * tset->n_validation;
return tset_size;
}
size_t GPUNet::total_dev_mem(int dev) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
return props.totalGlobalMem - 1213382500; // - 206688900; //minus 1.5 gb
}
void GPUNet::copy_to_device(float* set, int n_patterns, int fpp, float **d_set) {
CUDA_CHECK_RETURN(hipMalloc((void**)d_set, n_patterns*fpp*sizeof(float)));
CUDA_CHECK_RETURN(hipMemcpy(*d_set, set, n_patterns*fpp*sizeof(float), hipMemcpyHostToDevice));
}
| 27bdf7da0360a6da376a0bc0e91548de3a66b246.cu | /*
* GPUNet.cpp
*
* Created on: Jan 5, 2014
* Author: trevor
*
* GPU Neural Network
* Maintains network state and invokes functions on the GPU
*
*/
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <boost/lexical_cast.hpp>
#include <thrust/device_vector.h>
#include "curand_kernel.h"
#include "GPUNet.h"
#include "NetIO.h"
#include "NetTrainer.h"
/*
* ------------ CUDA ------------
*/
/**
* Get a random number within a given float range
* \param min float
* \param max float
* \param i int
* \param *global curandState
*/
__device__ float get_random_range(float min, float max, int i, curandState *global) {
curandState local = global[i];
float r = curand_uniform(&local);
global[i] = local;
return min + r * (max - min);
}
/*
* Get the weight from i in layer1 to j layer2 given the array of weights between them.
* n_layer is the number of nodes in the layer containing i.
*/
__device__ __inline__ float get_weight(float* weights, int n_layer, int i, int j) {
//return weights[(n_layer+1)*j + i];
return weights[n_layer*i + j];
}
__device__ __inline__ void set_weight(float* weights, int n_layer, int i, int j, float v) {
//weights[(n_layer+1)*j + i] = v;
weights[n_layer*i + j] = v;
}
/**
* Compute the sigmoid value of a given float
* \param x the value to compute the sigmoid of
*/
__device__ __inline__ float sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}
/**
* Clamp the output to 0 or 1 if within .1
*\param f the value to clamp
*/
__device__ int clamp(float f) {
if (f < .1) {
return 0;
} else if (f > .9) {
return 1;
} else {
return -1;
}
}
/*
*
* ------------- Initialization kernels ---------------
*
*/
/**
* Initialize random seeds in CUDA, will initalize blocksize seeds
*/
__global__ void curand_setup(curandState *state) {
unsigned int seed = (unsigned int)clock64();
int id = threadIdx.x;
curand_init(seed, id, 0, &state[id]);
}
/**
* initialize nodes to 0 or 1 if bias
* generic
*/
__global__ void init_nodes_layer_v2(int n, float *nodes) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
if (i == n-1)
nodes[i] = 1;
else
nodes[i] = 0;
}
}
/**
* set all output nodes to 0
*/
__global__ void init_nodes_output_v2(int n, float *output) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
output[i] = 0;
}
}
__global__ void init_weights_v2(int n1, int n2, float *weights, curandState *state) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// r is the range for random values
if (i < (n1+1)*n2) {
float r = 1.0 / sqrt((float)blockDim.x-1);
weights[i] = get_random_range(-r, r, threadIdx.x, state);
}
}
__global__ void init_deltas_v2(unsigned int n1, unsigned int n2, float *deltas) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < (n1+1)*n2) {
deltas[i] = 0;
}
}
/* --------------- Referencing and simple set function ---------------
* set bias
*
*/
//used when copying patterns to device
__global__ void set_bias(int n_input, float *d_inp) {
d_inp[n_input] = 1;
}
/*
* -------------- Error calculation ---------------
* output_correct
* mse_sum
*
*/
__device__ int d_num_correct = 0;
__device__ float d_acc = 0;
__device__ float d_mse_sum = 0;
__device__ float d_mse = 0; //current mse
__global__ void output_correct_v2(float *output, float *d_set, int t, int n_output) {
float *target = &(d_set[t]);
int n = 0;
for (int i = 0; i < n_output; ++i) {
if (clamp(output[i]) == clamp(target[i])) {
++n;
}
}
d_num_correct += (n == n_output);
}
__global__ void calc_acc(int n_patterns) {
d_acc = ((float)d_num_correct/n_patterns * 100);
d_num_correct = 0;
}
/** int fpp
* single threaded
*/
__global__ void mse_sum_v2(float *output, float *d_set, int t, int n_output) {
float sum = 0;
float *target = &(d_set[t]);
for (int i = 0; i < n_output; ++i) {
sum += pow(output[i] - target[i], 2);
}
d_mse_sum += sum;
}
/**
* single threaded
*/
__global__ void calc_mse(int n_output, int n_patterns) {
d_mse = d_mse_sum / (n_output * n_patterns);
d_mse_sum = 0;
}
/*
* ---- feed forward kernels -----------
*
* method 1 calculates each node in the next layer with a single thread computing for each output node
* method 2 has a thread for each term in the linear combination to compute the output
* then the activation is computed after syncing threads.
*/
/*
* to measure bandwidth:
* (bytes read + bytes writen) / (time secs * 10^9) = gb
*
* bytes read = 4* ((n_layer1+1)*2),
* bytes written = 4* (n_layer2)
* total/thread = 4*((n_layer1+1)*2 + n_layer2)
* threads l1 -> l2 = n_hidden
* threads l2 -> l3 = n_output
*
* total_l1->l2 = n_hidden*4*((n_layer1+1)*2 + n_layer2)
* total_l2->l3 = n_output*4*((n_layer2+1)*2 + n_layer3)
*
* total = total_l1->l2 + total_l2->l3;
*/
__global__ void feed_forward_layer_v1(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
int n = threadIdx.x; // node to compute;
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
r += layer1[i] * get_weight(weights, n_layer1, i, n);
}
layer2[n] = sigmoid(r);
}
/*
* Generic version, called with pow of 2 threads
*/
__global__ void feed_forward_layer_v1_2(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
unsigned int n = blockIdx.x * blockDim.x+threadIdx.x; // node to compute;
if (n < n_layer2) {
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
//r += layer1[i] * weights[(n_layer1+1)*n + i];
//r += layer1[i] * get_weight(weights, n_layer1, i, n);
//r += layer1[i] * get_weight(weights, n_layer2, i, n);
r += layer1[i] * weights[(n_layer2)*i + n];
//printf("l2: n=%d, r=%f, input[%d]=%f, weight[%d,%d]=%f, t = %f\n", n, r, i, layer1[i],i,n,weights[(n_layer1+1)*n+i], (layer1[i] * weights[(n_layer1+1)*n + i]) );
}
layer2[n] = sigmoid(r);
}
}
__global__ void feed_forward_layer_v1_2_flat(int n_layer1, int n_layer2, float* d_set, int ind, float* layer2, float* weights) {
unsigned int n = blockIdx.x * blockDim.x+threadIdx.x; // node to compute;
if (n < n_layer2) {
//printf("n=%d, n_layer2=%d\n",n, n_layer2);
float *layer1 = &(d_set[ind]);
float r = 0;
for (int i = 0; i <= n_layer1; ++i) { //include bias
//r += layer1[i] * weights[(n_layer1+1)*n + i];
//r += layer1[i] * get_weight(weights, n_layer1, i, n);
//r += layer1[i] * get_weight(weights, n_layer2, i, n);
r += layer1[i] * weights[(n_layer2)*i + n];
//printf("l1: n=%d, r=%f, input[%d]=%f, weight[%d,%d]=%f, t = %f\n", n, r, i, layer1[i],i,n,weights[(n_layer1+1)*n+i], (layer1[i] * weights[(n_layer1+1)*n + i]) );
}
//printf("n = %d, sigmoid(%f)=%f\n",n, r,sigmoid(r));
layer2[n] = sigmoid(r);
}
}
/*
* calc each term of linear combination in separate thread,
* store in shared memory. So reduction in same kernel.
* Works only if num inputs is less than reasonable blocksize, probably 1024 max.
* Reduction code adapted from: NVIDIA presentation
* http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*
*/
template <unsigned int blockSize>
__global__ void feed_forward_layer_v2(int n_layer1, int n_layer2, float* layer1, float* layer2, float* weights) {
extern __shared__ float terms[];
unsigned int n = blockIdx.x; // node to compute;
unsigned int tid = threadIdx.x;
terms[tid] = 0;
if (n < n_layer2 && tid <= n_layer1)
//terms[tid] = layer1[tid] * get_weight(weights, n_layer1, tid, n);
//terms[tid] = layer1[tid] * weights[(n_layer1+1)*n + tid];
//terms[tid] = layer1[tid] * get_weight(weights, n_layer2, tid, n);
terms[tid] = layer1[tid] * weights[(n_layer2)*tid + n];
__syncthreads();
// if (terms[tid] != 0)
// printf("l2: terms[%d]=%f\n", tid, terms[tid]);
if (blockSize >= 1024) { if (tid < 512) { terms[tid] += terms[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { terms[tid] += terms[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
layer2[n] = sigmoid(terms[0]);
//__syncthreads();
//printf("terms[%d]=%f\n", tid, terms[tid]);
}
template <unsigned int blockSize>
__global__ void feed_forward_layer_v2_flat(int n_layer1, int n_layer2, float* d_set, int ind, float* layer2, float* weights) {
extern __shared__ float terms[];
unsigned int n = blockIdx.x; // node to compute;
unsigned int tid = threadIdx.x;
terms[tid] = 0;
if (n < n_layer2 && tid <= n_layer1) {
float *layer1 = &(d_set[ind]);
//terms[tid] = layer1[tid] * get_weight(weights, n_layer1, tid, n);
//terms[tid] = layer1[tid] * weights[(n_layer1+1)*n + tid];
//terms[tid] = layer1[tid] * get_weight(weights, n_layer2, tid, n);
terms[tid] = layer1[tid] * weights[(n_layer2)*tid + n];
}
__syncthreads();
// if (terms[tid] != 0)
// printf("l1: terms[%d]=%f\n", tid, terms[tid]);
if (blockSize >= 1024) { if (tid < 512) { terms[tid] += terms[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { terms[tid] += terms[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
layer2[n] = sigmoid(terms[0]);
//__syncthreads();
//printf("terms[%d]=%f\n", tid, terms[tid]);
}
__global__ void clamp_outputs(float *output, int n) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < n) {
output[i] = clamp(output[i]);
}
}
/*
*
*
* ------------ backprop kernels ---------
* It is slightly faster if I manually inline the calc_output_gradient and calc_hidden_gradient functions.
* But VERY slightly. About 10 ms cumulatively over 1000 iterations. So insignificant I'm not
* going to mess with it.
*
*/
/**
* Compute the output gradient given specific output and target values
* \param output float
* \param target float
*/
__device__ __inline__ float calc_output_gradient(float output, float target) {
return output * (1 - output) * (target - output);
}
/*
* called generically, pow of 2 threads
*/
__global__ void output_error_gradients_v2(float* output, float* d_set, int t, float* output_err_gradients, int no) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < no) {
float *target = &(d_set[t]);
output_err_gradients[i] = calc_output_gradient(output[i], target[i]);
//printf("out_err_grad[%d] = %f, output = %f, target = %f\n", i, output_err_gradients[i], output[i], target[i]);
}
}
/*
* called generically with power of 2 threads
*/
__global__ void update_hidden_output_deltas_v2(int nh, int no, float l_rate, float momentum,
float* hidden, float* output_err_gradients, float* delta_ho) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (nh+1)*no) { // if in range
//int j = x % (nh+1); //hidden node
//int k = x / (nh+1);
int j = x / no;
int k = x % no;
//delta_ho[nh*k + j] = l_rate * hidden[j]hidden_err_grad * output_err_gradients[k] + momentum * delta_ho[nh*k + j];
//NOTE: likely to be more hidden nodes than output nodes so more advantageous to keep j coalesced
delta_ho[x] = l_rate * hidden[j] * output_err_gradients[k] + momentum * delta_ho[x];
//printf("x=%d, delta_ho(%d, %d) = %f, l_rate = %f, hidden[%d] = %f, out_err_gradients[%d] = %f, momentum = %f\n",
// x, j, k, delta_ho[x], l_rate, j, hidden[j], k, output_err_gradients[k], momentum);
}
}
__device__ __inline__ float calc_hidden_gradient(int j, int nh, int no, float* hidden, float* d_ho_weights, float* output_err_gradients) {
//get sum of hidden->output weights * output error gradients
float s = 0;
for (int k = 0; k < no; ++k)
//s += get_weight(d_ho_weights, nh, j, k) * output_err_gradients[k];
s += get_weight(d_ho_weights, no, j, k) * output_err_gradients[k];
//s += d_ho_weights[(nh+1)*k + j] * output_err_gradients[k];
//return error gradient
return hidden[j] * (1 - hidden[j]) * s;
}
/*
* called generically, pow of 2 threads
*/
__global__ void hidden_error_gradients_v2(int nh, int no, float* hidden, float* d_ho_weights, float* hidden_err_gradients, float* output_err_gradients) {
unsigned int j = blockIdx.x * blockDim.x+threadIdx.x;
if (j < nh) { //NOTE: another bug, had (j < (nh+1)*no), only nh nodes need calculated
hidden_err_gradients[j] = calc_hidden_gradient(j, nh, no, hidden, d_ho_weights, output_err_gradients);
//printf("hidden_err_grad[%d] = %f\n", j, hidden_err_gradients[j]);
}
}
/*
* num blocks = num hidden nodes
* num threads per block = 128, 256 etc
*
* This can be used as long as the number of output nodes is less than 128 or say 256.
* If the output is a single node the other way is likely faster.
*/
template <unsigned int blockSize>
__global__ void hidden_error_gradients_v3(int nh, int no, float* hidden, float* d_ho_weights, float* hidden_err_gradients, float* output_err_gradients) {
extern __shared__ float terms[]; // the number of terms will be equal to the number of output nodes
unsigned int j = blockIdx.x; //hidden node gradient to compute
unsigned int tid = threadIdx.x; //
terms[tid] = 0;
if (j < nh && tid < no) { //no bias on output so not <=
//terms[tid] = get_weight(d_ho_weights, nh, j, tid) * output_err_gradients[tid];
terms[tid] = get_weight(d_ho_weights, no, j, tid) * output_err_gradients[tid];
//terms[tid] = d_ho_weights[(nh+1)*tid + j] * output_err_gradients[tid];
}
__syncthreads();
if (blockSize >= 256) {if (tid < 128) { terms[tid] += terms[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) {if (tid < 64) { terms[tid] += terms[tid + 64]; } __syncthreads(); }
if (tid < 32) { if (blockSize >= 64) terms[tid] += terms[tid + 32];
if (blockSize >= 32) terms[tid] += terms[tid + 16];
if (blockSize >= 16) terms[tid] += terms[tid + 8];
if (blockSize >= 8) terms[tid] += terms[tid + 4];
if (blockSize >= 4) terms[tid] += terms[tid + 2];
if (blockSize >= 2) terms[tid] += terms[tid + 1];
}
if (tid == 0)
hidden_err_gradients[j] = hidden[j] * (1 - hidden[j]) * terms[0];
}
/*
* called with any number of blocks / threads
* normally, 128 or other power of 2
*/
//TODO: perhaps there is a way to store the hidden_err_gradient[j] in shared memory
__global__ void update_input_hidden_deltas_v2(int ni, int nh, float l_rate, float momentum,
float* d_set, int i, float* hidden_err_gradients, float* delta_ih) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (ni+1)*nh) {
float *input = &(d_set[i]);
//int i = x % (ni+1); //input node, NOTE: same bug as before
//int j = x / (ni+1);
int i = x / nh;
int j = x % nh;
//delta_ih[ni*j + i] = l_rate * input[i] * hidden_err_gradients[j] + momentum * delta_ih[ni*j + i];
//NOTE: usually more input nodes than hidden nodes so keep input access coalesced
delta_ih[x] = l_rate * input[i] * hidden_err_gradients[j] + momentum * delta_ih[x];
//printf("x=%d, delta_ih(%d, %d) = %f, l_rate = %f, input[%d] = %f, hidden_err_gradients[%d] = %f, momentum = %f\n",
// x, i, j, delta_ih[x], l_rate, i, input[i], j, hidden_err_gradients[j], momentum);
}
}
/*
* called generically with power of 2 threads
*/
__global__ void update_weights_v2(int n1, int n2, float *d_weights, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
//Indexing is irrelevant here
d_weights[x] += deltas[x];
}
}
__constant__ __device__ float delta_min = -0.01;
__constant__ __device__ float delta_max = 0.01;
/*
* splitting kernels to save stochastic update a few comparisons
*/
__global__ void update_weights_batch_v2(int n1, int n2, float *d_weights, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
//Indexing is irrelevant here
if (deltas[x] > delta_max) //using batch
d_weights[x] += delta_max;
else if (deltas[x] < delta_min)
d_weights[x] += delta_min;
else
d_weights[x] += deltas[x];
deltas[x] = 0;
}
}
/*
*
* ------- RProp Kernels -----------
*
*/
/*
* called generically, pow of 2 threads
*/
__global__ void output_error_gradients_rprop(float* output, float* target, float* output_err_gradients, float* output_err_gradients_tmp, int no) {
unsigned int i = blockIdx.x * blockDim.x+threadIdx.x;
if (i < no) {
output_err_gradients_tmp[i] = output_err_gradients[i];
output_err_gradients[i] = calc_output_gradient(output[i], target[i]);
//printf("out_err_grad[%d] = %f, output = %f, target = %f\n", i, output_err_gradients[i], output[i], target[i]);
}
}
__global__ void update_hidden_output_deltas_rprop(int nh, int no, float step_p, float step_m, float d_max, float d_min,
float* hidden, float* output_err_gradients, float* output_err_gradients_tmp, float* delta_ho) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (nh+1)*no) { // if in range
int j = x % (nh+1); //input node
int k = x % no; //hidden node
int r = output_err_gradients[x] * output_err_gradients_tmp[x];
if (r > 0) {
delta_ho[nh*k + j] = min(delta_ho[nh*k + j] * step_p, d_max);
} else if (r < 0) {
delta_ho[nh*k + j] = max(delta_ho[nh*k + j] * step_m, d_min);
} else {
//TODO: need something here for start when delta = 0
}
}
}
__global__ void update_weights_rprop(int n1, int n2, float *d_weights, float* gradients, float *deltas) {
unsigned int x = blockIdx.x * blockDim.x+threadIdx.x;
if (x < (n1+1)*n2) {
int i = x % (n1+1); //layer 1 node, NOTE: same bug
int j = x % n2; //layer 2 node
int sign = (gradients[j] > 0) - (gradients[j] < 0);
d_weights[n1*j + i] = d_weights[n1*j + i] - sign*deltas[n1*j + i];
}
}
/*
*
* --------- Debugging ------------
*
*/
__global__ void print_gpu_net(int n_input, int n_hidden, int n_output,
float *hidden, float *output, float *ih_weights, float *ho_weights) {
//for (int i = 0; i <= n_input; ++i) {
// printf("input %d: %f, ", i, input[i]);
//}
//printf("\n");
for (int i = 0; i <= n_input; ++i) {
for (int j = 0; j < n_hidden; ++j) {
printf("ih weight (%d,%d,%d): %f, ", i, j, (n_input+1)*j + i, get_weight(ih_weights, n_input, i, j));
}
}
printf("\n");
for (int i = 0; i <= n_hidden; ++i) {
printf("hidden %d: %f, ", i, hidden[i]);
}
printf("\n");
for (int i = 0; i <= n_hidden; ++i) {
for (int j = 0; j < n_output; ++j) {
printf("ho weight (%d,%d,%d): %f, ", i, j, (n_hidden+1)*j + i, get_weight(ho_weights, n_hidden, i, j));
}
}
printf("\n");
for (int i = 0; i < n_output; ++i) {
printf("output %d: %f, ", i, output[i]);
}
printf("\n");
}
__global__ void print_target(int n_output, float *target) {
for (int i = 0; i < n_output; ++i) {
printf("target[%d] = %f\n", i, target[i]);
}
}
__global__ void print_input(int n_input, float *input) {
for (int i = 0; i < n_input+1; i++) {
printf("input[%d] = %f\n", i, input[i]);
}
}
/*
* ---------- Constructors -------------
*/
GPUNet::GPUNet() {
GPUNet::init_vars();
}
GPUNet::GPUNet(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type=GPUNetSettings::STANDARD) {
GPUNet::init_vars();
GPUNet::init_structure(ni, no, hidden_pct, net_type);
GPUNet::init_nio();
GPUNet::set_bsizes();
}
GPUNet::~GPUNet() {
cudaFree(d_input);
cudaFree(d_hidden);
cudaFree(d_output);
cudaFree(d_target);
cudaFree(d_ih_weights);
cudaFree(d_ho_weights);
cudaFree(d_ih_deltas);
cudaFree(d_ho_deltas);
cudaFree(d_hid_err_gradients);
cudaFree(d_out_err_gradients);
/*
//I'm getting a bad resource handle error at this line
CUDA_CHECK_RETURN(cudaStreamDestroy(bprop_stream));
CUDA_CHECK_RETURN(cudaStreamDestroy(err_calc_stream));
CUDA_CHECK_RETURN(cudaStreamDestroy(weight_update_stream1));
CUDA_CHECK_RETURN(cudaStreamDestroy(weight_update_stream2));
CUDA_CHECK_RETURN(cudaStreamDestroy(train_stream1));
CUDA_CHECK_RETURN(cudaStreamDestroy(train_stream2));
CUDA_CHECK_RETURN(cudaStreamDestroy(copy_stream));
CUDA_CHECK_RETURN(cudaEventDestroy(event1));
CUDA_CHECK_RETURN(cudaEventDestroy(event2));*/
delete[] h_output;
delete[] h_ih_weights;
delete[] h_ho_weights;
delete[] gpu_mem;
delete nio;
}
bool GPUNet::load_netfile(std::string net_file) {
std::cout << "Initializing from net file: " << net_file << std::endl;
GPUNet::init_nio();
bool loaded = GPUNet::read_net(net_file);
GPUNet::set_bsizes();
return loaded;
}
void GPUNet::init(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type) {
GPUNet::init_structure(ni, no, hidden_pct, net_type);
GPUNet::init_nio();
GPUNet::set_bsizes();
}
/*
* -------------- public ---------------
*/
void GPUNet::init_nio() {
nio = new NetIO();
nio->set_gnet(this);
}
void GPUNet::init_structure(unsigned int ni, unsigned int no, float hidden_pct, GPUNetSettings::NetworkStructure net_type) {
if (n_input != 0) { // constructor initializing nodes has been called, error out
std::cerr << "Network has already been initialized" << std::endl;
} else if (ni != 0) { // if not empty constructor
n_input = ni;
n_output = no;
GPUNet::net_type = net_type;
if (net_type == GPUNetSettings::STANDARD) {
n_hidden = ceil(hidden_pct*ni);
} else if (net_type == GPUNetSettings::GPU_ARCH_OPT) {
//get first multiple of 128 greater than 2.0/3.0*ni
n_hidden = (hidden_pct*ni+127) / 128 * 128;
} else {
std::cerr << "Invalid network type: " << net_type << std::endl;
exit(1);
}
}
}
void GPUNet::init_vars() {
max_epochs = GPUNetSettings::GPU_MAX_EPOCHS;
l_rate = GPUNetSettings::GPU_LEARNING_RATE;
momentum = GPUNetSettings::GPU_MOMENTUM;
desired_acc = GPUNetSettings::GPU_DESIRED_ACCURACY;
batching = GPUNetSettings::GPU_USE_BATCH;
save_freq = GPUNetSettings::GPU_SAVE_FREQUENCY;
base_file_path = GPUNetSettings::GPU_BASE_FILE_NAME;
CUDA_CHECK_RETURN(cudaGetDeviceCount(&n_gpus));
epoch = 0;
trainingSetAccuracy = 0;
validationSetAccuracy = 0;
generalizationSetAccuracy = 0;
trainingSetMSE = 0;
validationSetMSE = 0;
generalizationSetMSE = 0;
start = 0;
finish = 0;
n_input = 0;
n_hidden = 0;
n_output = 0;
gpu_opt_bprop_bsize = 0;
gpu_opt_ff_ih_bsize = 0;
gpu_opt_ff_ho_bsize = 0;
/*
* device
*/
d_input = NULL;
d_hidden = NULL;
d_output = NULL;
d_target = NULL;
d_ih_weights = NULL;
d_ho_weights = NULL;
d_ih_deltas = NULL;
d_ho_deltas = NULL;
d_hid_err_gradients = NULL;
d_out_err_gradients = NULL;
/*
* host validation
*/
h_output = NULL;
h_ih_weights = NULL;
h_ho_weights = NULL;
//init gpu mem to 0 for each gpu
gpu_mem = NULL;
}
void GPUNet::set_bsizes() {
std::cout << "Finding ideal block sizes: ";
//get first power of 2 larger than n_output
gpu_opt_bprop_bsize = pow2roundup(n_output);
std::cout << "bprop bsize=" << gpu_opt_bprop_bsize << ", ";
gpu_opt_ff_ih_bsize = pow2roundup(n_input+1);
gpu_opt_ff_ho_bsize = pow2roundup(n_hidden+1);
std::cout << "ff ih bsize=" << gpu_opt_ff_ih_bsize << ", ";
std::cout << "ff ho bsize=" << gpu_opt_ff_ho_bsize << std::endl;
}
void GPUNet::alloc_host_mem() {
h_output = new float[n_output];
h_ih_weights = new float[(n_input+1)*n_hidden];
h_ho_weights = new float[(n_hidden+1)*n_output];
gpu_mem = new size_t[n_gpus];
memset(gpu_mem, 0, n_gpus*sizeof(size_t));
}
/*
* allocate memory on device for
* input, hidden, output, target
* ih_weights, ho_weights
* ih_deltas, ho_deltas
* hid_err_gradients
* out_err_gradients
*/
void GPUNet::alloc_dev_mem() {
//nodes
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_input, (n_input+1)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_hidden, (n_hidden+1)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_output, (n_output)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_target, (n_output)*sizeof(float)));
add_gpu_mem((n_input+n_hidden+(2*n_output)+2)*sizeof(float));
//weights
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_ih_weights, ((n_input+1)*n_hidden)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_ho_weights, ((n_hidden+1)*n_output)*sizeof(float)));
add_gpu_mem(((n_input+1)*n_hidden + (n_hidden+1)*n_output)*sizeof(float));
//create delta arrays, include bias
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_ih_deltas, ((n_input+1)*n_hidden)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_ho_deltas, ((n_hidden+1)*n_output)*sizeof(float)));
add_gpu_mem(((n_input+1)*n_hidden + (n_hidden+1)*n_output)*sizeof(float));
//error gradients
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_hid_err_gradients, (n_hidden+1)*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_out_err_gradients, (n_output+1)*sizeof(float)));
add_gpu_mem((n_hidden + n_output + 2)*sizeof(float));
CUDA_CHECK_RETURN(cudaStreamCreate(&bprop_stream));
CUDA_CHECK_RETURN(cudaStreamCreate(&err_calc_stream));
CUDA_CHECK_RETURN(cudaStreamCreate(&weight_update_stream1));
CUDA_CHECK_RETURN(cudaStreamCreate(&weight_update_stream2));
CUDA_CHECK_RETURN(cudaStreamCreate(©_stream));
CUDA_CHECK_RETURN(cudaEventCreate(&event1));
CUDA_CHECK_RETURN(cudaEventCreate(&event2));
}
/*
* Note: assumes sizes of networks are the same
* This is for testing purposes so that
* I can have identical networks.
*/
void GPUNet::init_from_net(Net &net, NetData &d) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//copy first pattern to input neurons so it is copied to device, instead of zeros
//for (int i = 0; i < net.n_input; ++i) {
//net.inputNeurons[i] = d.get_training_dataset()->training_set[0]->input[i];
//}
// so hidden and output initialized to 0
CUDA_CHECK_RETURN(cudaMemcpy(d_input, net.inputNeurons, (net.n_input)*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_hidden, net.hiddenNeurons, (net.n_hidden)*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_output, net.outputNeurons, (net.n_output)*sizeof(float), cudaMemcpyHostToDevice));
set_bias<<<1,1>>>(n_input, d_input);
set_bias<<<1,1>>>(n_hidden, d_hidden);
CUDA_CHECK_RETURN(cudaMemcpy(d_ih_weights, net.wInputHidden, (net.n_input+1)*(net.n_hidden)*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_ho_weights, net.wHiddenOutput, (net.n_hidden+1)*(net.n_output)*sizeof(float), cudaMemcpyHostToDevice));
init_deltas_v2<<<((n_input+1)*n_hidden+threads-1)/threads, threads>>>(n_input+1, n_hidden, d_ih_deltas);
init_deltas_v2<<<((n_hidden+1)*n_output+threads-1)/threads, threads>>>(n_hidden+1, n_output, d_ho_deltas);
std::cout << "Data copied to device" << std::endl;
}
void GPUNet::init_net() {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//init nodes to all 0
init_nodes_layer_v2<<<(n_input+1+threads-1)/threads, threads>>>(n_input+1, d_input);
init_nodes_layer_v2<<<(n_hidden+1+threads-1)/threads, threads>>>(n_hidden+1, d_hidden);
init_nodes_output_v2<<<(n_output+threads-1)/threads, threads>>>(n_output, d_output);
set_bias<<<1,1>>>(n_input, d_input);
set_bias<<<1,1>>>(n_hidden, d_hidden);
//init weights to random vals
curandState *state;
CUDA_CHECK_RETURN(cudaMalloc(&state, threads*sizeof(curandState)));
curand_setup<<<1, threads>>>(state);
init_weights_v2<<<((n_input+1)*n_hidden+threads-1)/threads, threads>>>(n_input+1, n_hidden, d_ih_weights, state);
init_weights_v2<<<((n_hidden+1)*n_output+threads-1)/threads, threads>>>(n_hidden+1, n_output, d_ho_weights, state);
CUDA_CHECK_RETURN(cudaFree(state));
//init deltas to 0
init_deltas_v2<<<((n_input+1)*n_hidden+threads-1)/threads, threads>>>(n_input+1, n_hidden, d_ih_deltas);
init_deltas_v2<<<((n_hidden+1)*n_output+threads-1)/threads, threads>>>(n_hidden+1, n_output, d_ho_deltas);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
}
void GPUNet::set_learning_rate(float lr) {
l_rate = lr;
}
void GPUNet::set_momentum(float m) {
momentum = m;
}
void GPUNet::set_training_params(float lr, float m, bool b) {
l_rate = lr;
momentum = m;
batching = b;
}
void GPUNet::set_max_epochs(int me) {
max_epochs = me;
}
void GPUNet::set_save_frequency(int f) {
save_freq = f;
}
void GPUNet::set_desired_accuracy(float acc) {
desired_acc = acc;
}
void GPUNet::set_stopping_conds(int me, float acc) {
max_epochs = me;
desired_acc = acc;
}
void GPUNet::set_base_file_name(std::string f) {
base_file_path = f;
}
/*
* to keep it simple, run in 1 thread
*/
void GPUNet::print_net() {
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
print_gpu_net<<<1, 1>>>(n_input, n_hidden, n_output,
d_hidden, d_output, d_ih_weights, d_ho_weights);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
}
/*
* transfer weights back to host
* write important data (num_epochs, layers, nodes/layer, l_rate, momentum, max_epochs, desired_acc, current mse, current acc)
*
*/
bool GPUNet::write_net(std::string fname) {
//need to copy mse and acc back to host
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "current acc=" << trainingSetAccuracy << ", current mse=" << trainingSetMSE << std::endl;
if (!nio->write_net(fname)) {
std::cerr << "Write failed" << std::endl;
return false;
}
return true;
}
bool GPUNet::read_net(std::string fname) {
if (!nio->read_net(fname)) {
std::cerr << "Read failed" << std::endl;
return false;
}
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//init nodes to 0
init_nodes_layer_v2<<<(n_input+1+threads-1)/threads, threads>>>(n_input+1, d_input);
init_nodes_layer_v2<<<(n_hidden+1+threads-1)/threads, threads>>>(n_hidden+1, d_hidden);
init_nodes_output_v2<<<(n_output+threads-1)/threads, threads>>>(n_output, d_output);
set_bias<<<1,1>>>(n_input, d_input);
set_bias<<<1,1>>>(n_hidden, d_hidden);
//init deltas to 0
init_deltas_v2<<<((n_input+1)*n_hidden+threads-1)/threads, threads>>>(n_input+1, n_hidden, d_ih_deltas);
init_deltas_v2<<<((n_hidden+1)*n_output+threads-1)/threads, threads>>>(n_hidden+1, n_output, d_ho_deltas);
return true;
}
void GPUNet::run_test_set(TrainingDataSet *tset) {
std::cout << std::endl << "Running test set: " << std::endl;
calc_dataset_parameters(tset);
float* d_test_set;
//TODO: this assumes that the validation set always fits in GPU memory. Fine for now.
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_test_set, tset->n_validation*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpy(d_test_set, tset->validation_set, tset->n_validation*tset->fpp*sizeof(float), cudaMemcpyHostToDevice));
for (int i = 0; i < tset->n_validation; ++i) {
//wait for error calculation to finish before doing next feed forward iteration
CUDA_CHECK_RETURN(cudaStreamSynchronize(err_calc_stream));
//Because all error calculations are done in the same stream the next one cannot begin before the previous one finishes
//even if it is called before it is finished. So no need to synchronize.
//called with index of start position of target
feed_forward_v1_2(d_test_set, i*tset->fpp);
mse_sum_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_test_set, i*tset->fpp+n_input+1, n_output);
output_correct_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_test_set, i*tset->fpp+n_input+1, n_output);
}
calc_mse<<<1, 1, 0, err_calc_stream>>>(n_output, tset->n_validation);
calc_acc<<<1, 1, 0, err_calc_stream>>>(tset->n_validation);
copy_error_to_host(&validationSetMSE, &validationSetAccuracy);
std::cout << "Test set MSE = " << validationSetMSE << std::endl;
std::cout << "Test set ACC = " << validationSetAccuracy << std::endl;
//free training set
CUDA_CHECK_RETURN(cudaFree(d_test_set));
}
/*
* run the input through the network
*/
float* GPUNet::evaluate(float* input) {
//copy to device
//feed forward
//copy back output
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
float *h_out = new float[n_output];
CUDA_CHECK_RETURN(cudaMemcpy(d_input, input, (n_input)*sizeof(float), cudaMemcpyHostToDevice));
feed_forward_v1_2(d_input, 0);
clamp_outputs<<<(n_output+threads-1)/threads, threads>>>(d_output, n_output);
CUDA_CHECK_RETURN(cudaMemcpy(h_out, d_output, n_output*sizeof(float), cudaMemcpyDeviceToHost));
return h_out;
}
int GPUNet::get_num_input() {
return n_input;
}
int GPUNet::get_num_hidden() {
return n_hidden;
}
int GPUNet::get_num_output() {
return n_output;
}
void GPUNet::calc_dataset_parameters(TrainingDataSet *tset) {
std::cout << "Determining data set statistics" << std::endl;
// calc num patterns copyable
// num patterns = integer div of available memory / mem for single pattern
int bytes_per_pattern = sizeof(float)*((n_input+1)+(n_output));
int cur_dev = get_current_device();
std::cout << " bytes per pattern = "<<bytes_per_pattern<<std::endl;
std::cout << " total dev mem = "<< total_dev_mem(cur_dev)<<std::endl;
std::cout << " current mem usage = "<< current_mem_usage(cur_dev)<<std::endl;
int available_mem = total_dev_mem(cur_dev) - current_mem_usage(cur_dev);
std::cout << " available mem = "<<available_mem<<std::endl;
std::cout << " tset.size = "<<tset->n_patterns<<std::endl;
n_copyable_patterns = available_mem / bytes_per_pattern;
//ensure n_copyable_patterns is even and can be split into 2 buffers
if (n_copyable_patterns % 2 == 1) {
--n_copyable_patterns;
}
if (n_copyable_patterns < 0) {
std::cerr << "";
}
if (n_copyable_patterns > tset->n_patterns) {
n_copyable_patterns = tset->n_patterns;
}
// calc num sections
// num_sections = ceil ( n_patterns / n_copyable_patterns)
n_sections = (tset->n_patterns + n_copyable_patterns - 1) / n_copyable_patterns;
std::cout << " n_copyable_patterns = "<<n_copyable_patterns<<", n_sections = "<<n_sections<<std::endl<<std::endl;
}
void GPUNet::train_net_sectioned_overlap(TrainingDataSet *tset) {
calc_dataset_parameters(tset);
std::cout << std::endl << "Neural Network Training Starting: " << std::endl
<< "----------------------------------------------------" << std::endl
<< "LR: " << l_rate << ", Momentum: " << momentum << ", Max Epochs: " << max_epochs << std::endl
<< n_input << " Input Neurons, " << n_hidden << " Hidden Neurons, " << n_output << " Output Neurons" << std::endl
<< "----------------------------------------------------" << std::endl << std::endl;
int buffer_size = n_copyable_patterns / 2;
float* d_training_buffer1;
float* d_training_buffer2;
//by default allocate array as large as possible
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_training_buffer1, buffer_size*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_training_buffer2, buffer_size*tset->fpp*sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpyAsync(d_training_buffer1, tset->training_set, buffer_size*tset->fpp*sizeof(float), cudaMemcpyHostToDevice, copy_stream));
int p_start = buffer_size;
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << ", ";
//on even epochs, process data in buffer1
//copy data to buffer 2
if (p_start > tset->n_training) p_start = 0;
int p_end = p_start+buffer_size;
if (p_end > tset->n_training) p_end = tset->n_training;
//ensure finished copying before processing that buffer
CUDA_CHECK_RETURN(cudaStreamSynchronize(copy_stream));
if (epoch % 2 == 0) {
CUDA_CHECK_RETURN(cudaMemcpyAsync(d_training_buffer2, &(tset->training_set[p_start]), p_end-p_start, cudaMemcpyHostToDevice, copy_stream));
run_training_epoch_dev(d_training_buffer1, buffer_size, tset->fpp);
} else {
CUDA_CHECK_RETURN(cudaMemcpyAsync(d_training_buffer1, &(tset->training_set[p_start]), p_end-p_start, cudaMemcpyHostToDevice, copy_stream));
run_training_epoch_dev(d_training_buffer2, buffer_size, tset->fpp);
}
p_start += buffer_size;
++epoch;
if (epoch % save_freq == 0) {
std::string fname = "nets/face_" + boost::lexical_cast<std::string>(epoch) + ".net";
std::cout << "Writing intermediary net " << fname << std::endl;
write_net(fname);
}
}
//out validation accuracy and MSE
std::cout << std::endl << "Training complete. Elapsed epochs: " << epoch << std::endl;
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "MSE = " << trainingSetMSE << std::endl;
std::cout << "ACC = " << trainingSetAccuracy << std::endl;
}
__global__ void printtset(float* set, int n) {
for (int i = 0; i < n; i++) {
printf("%f ", set[i]);
}
printf("\n");
}
void GPUNet::train_net_sectioned(TrainingDataSet *tset) {
calc_dataset_parameters(tset);
std::cout << std::endl << "Neural network training starting: " << std::endl
<< "----------------------------------------------------" << std::endl
<< "LR: " << l_rate << ", momentum: " << momentum << ", max epochs: " << max_epochs << std::endl
<< n_input << " input, " << n_hidden << " hidden, " << n_output << " output " << std::endl
<< "----------------------------------------------------" << std::endl << std::endl;
float* d_training_set;
//by default allocate array as large as possible
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_training_set, n_copyable_patterns*tset->fpp*sizeof(float)));
if (n_sections == 1) { // no section copying necessary
CUDA_CHECK_RETURN(cudaMemcpy(d_training_set, tset->training_set, tset->n_training*tset->fpp*sizeof(float), cudaMemcpyHostToDevice));
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << ", ";
run_training_epoch_dev(d_training_set, tset->n_training, tset->fpp);
++epoch;
//copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
//std::cout << "current mse = " << trainingSetMSE << ", current acc = " << trainingSetAccuracy << std::endl;
if (epoch % save_freq == 0) {
std::string fname = base_file_path + "_" + boost::lexical_cast<std::string>(epoch) + ".net";
std::cout << "Writing intermediary net " << fname << std::endl;
write_net(fname);
if (trainingSetAccuracy > desired_acc)
break; //just run test set
}
}
} else {
while (epoch < max_epochs) {
std::cout << "Epoch: " << epoch << std::endl;
//copy a section and run partial epoch
for (int i = 0; i < n_sections; ++i) {
//copy patterns from [n_sections*n_patterns_copyable, (n_sections+1)*n_patterns_copyable)
int p_start = i * n_copyable_patterns;
int p_end = p_start + n_copyable_patterns;
if (p_end > tset->n_training) p_end = tset->n_training;
std::cout << "copying section="<<i<<", pstart="<< p_start << ", pend="<<p_end << std::endl;
CUDA_CHECK_RETURN(cudaMemcpy(d_training_set, &(tset->training_set[p_start]), (p_end-p_start)*sizeof(float), cudaMemcpyHostToDevice));
std::cout << "data copied" << std::endl;
run_training_epoch_dev(d_training_set, p_end-p_start, tset->fpp);
}
//once training set is complete increment epoch
++epoch;
}
}
//out validation accuracy and MSE
std::cout << std::endl << "Training complete. Elapsed epochs: " << epoch << std::endl;
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "MSE = " << trainingSetMSE << std::endl;
std::cout << "ACC = " << trainingSetAccuracy << std::endl;
//free training set
CUDA_CHECK_RETURN(cudaFree(d_training_set));
run_test_set(tset);
}
void GPUNet::copy_error_to_host(float* mse, float* acc) {
CUDA_CHECK_RETURN(cudaStreamSynchronize(err_calc_stream)); //make sure error calculation has completed.
CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(mse, d_mse, sizeof(float), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(acc, d_acc, sizeof(float), 0, cudaMemcpyDeviceToHost));
}
void GPUNet::run_training_epoch_dev(float *set, int n_features, int fpp) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
start = clock();
if (batching) {
//if doing batch make sure weights have been updated after last epoch
CUDA_CHECK_RETURN(cudaStreamSynchronize(weight_update_stream1));
CUDA_CHECK_RETURN(cudaStreamSynchronize(weight_update_stream2));
}
for (int i = 0; i < n_features; ++i) {
//called with index of start position of target
if (!batching) {
//need to wait for weight update, otherwise no sync because gradients and deltas in same stream
CUDA_CHECK_RETURN(cudaStreamSynchronize(weight_update_stream1));
CUDA_CHECK_RETURN(cudaStreamSynchronize(weight_update_stream2));
}
feed_forward_v2(set, i*fpp);
backprop_v3(set, i*fpp, i*fpp+n_input+1);
}
if (batching) { //update weights here and reset deltas
CUDA_CHECK_RETURN(cudaEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream1, event1, 0));
CUDA_CHECK_RETURN(cudaEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream2, event2, 0));
update_weights_batch_v2<<<((n_output*(n_hidden+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream1>>>(n_hidden, n_output, d_ho_weights, d_ho_deltas);
update_weights_batch_v2<<<((n_hidden*(n_input+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream2>>>(n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
calc_mse<<<1, 1, 0, err_calc_stream>>>(n_output, n_features);
calc_acc<<<1, 1, 0, err_calc_stream>>>(n_features);
finish = clock();
std::cout << "time: " << ((double)finish-start)/CLOCKS_PER_SEC << std::endl;
}
void GPUNet::backprop_v2(float* d_set, int i, int t) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//maintain mse state
mse_sum_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_set, t, n_output);
output_correct_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_set, t, n_output);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//float mse_sum = 0;
//CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(&mse_sum, d_mse_sum, sizeof(float), 0, cudaMemcpyDeviceToHost));
//std::cout << "Current mse_sum = " << mse_sum << std::endl;
output_error_gradients_v2<<<(n_output+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(d_output, d_set, t, d_out_err_gradients, n_output);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
update_hidden_output_deltas_v2<<<((n_output*(n_hidden+1))+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_hidden, n_output, l_rate, momentum, d_hidden, d_out_err_gradients, d_ho_deltas);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
hidden_error_gradients_v2<<<(n_hidden+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(cudaEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream1, event1, 0));
update_weights_v2<<<((n_output*(n_hidden+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream1>>>(n_hidden, n_output, d_ho_weights, d_ho_deltas);
}
update_input_hidden_deltas_v2<<<((n_hidden*(n_input+1))+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_input, n_hidden, l_rate, momentum,
d_set, i, d_hid_err_gradients, d_ih_deltas);
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(cudaEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream2, event2, 0));
update_weights_v2<<<((n_hidden*(n_input+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream2>>>(n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
}
void GPUNet::backprop_v3(float* d_set, int i, int t) {
int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//maintain mse state
mse_sum_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_set, t, n_output);
output_correct_v2<<<1, 1, 0, err_calc_stream>>>(d_output, d_set, t, n_output);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//float mse_sum = 0;
//CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(&mse_sum, d_mse_sum, sizeof(float), 0, cudaMemcpyDeviceToHost));
//std::cout << "Current mse_sum = " << mse_sum << std::endl;
output_error_gradients_v2<<<(n_output+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(d_output, d_set, t, d_out_err_gradients, n_output);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
update_hidden_output_deltas_v2<<<((n_output*(n_hidden+1))+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_hidden, n_output, l_rate, momentum, d_hidden, d_out_err_gradients, d_ho_deltas);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
if (gpu_opt_bprop_bsize <= 1) {
hidden_error_gradients_v2<<<(n_hidden+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 32) {
hidden_error_gradients_v3<32><<<n_hidden, 32, 32*sizeof(float), bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 64) {
hidden_error_gradients_v3<64><<<n_hidden, 64, 64*sizeof(float), bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 128) {
hidden_error_gradients_v3<128><<<n_hidden, 128, 128*sizeof(float), bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else if (gpu_opt_bprop_bsize <= 256) {
hidden_error_gradients_v3<256><<<n_hidden, 256, 256*sizeof(float), bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
} else {
hidden_error_gradients_v2<<<(n_hidden+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_hidden, n_output, d_hidden, d_ho_weights,
d_hid_err_gradients, d_out_err_gradients);
}
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(cudaEventRecord(event1, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream1, event1, 0));
update_weights_v2<<<((n_output*(n_hidden+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream1>>>(n_hidden, n_output, d_ho_weights, d_ho_deltas);
}
update_input_hidden_deltas_v2<<<((n_hidden*(n_input+1))+n_threads-1)/n_threads, n_threads, 0, bprop_stream>>>(n_input, n_hidden, l_rate, momentum,
d_set, i, d_hid_err_gradients, d_ih_deltas);
if (!batching) { // don't update weights here
CUDA_CHECK_RETURN(cudaEventRecord(event2, bprop_stream));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(weight_update_stream2, event2, 0));
update_weights_v2<<<((n_hidden*(n_input+1))+n_threads-1)/n_threads, n_threads, 0, weight_update_stream2>>>(n_input, n_hidden, d_ih_weights, d_ih_deltas);
}
}
void GPUNet::rprop(float *d_inp, float *d_tar) {
//int n_threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
//calc hidden out gradients
//
}
void GPUNet::feed_forward_v1_2(float* d_set, int i) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
feed_forward_layer_v1_2_flat<<<(n_hidden+threads-1)/threads, threads>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v1_2<<<(n_output+threads-1)/threads, threads>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
}
/*
* TODO: what if the first layer cannot be done using the reduction but the second layer can
*/
/*void GPUNet::feed_forward_v2(float* d_set, int i) {
switch (gpu_opt_ff_ih_bsize) {
case 1:
feed_forward_layer_v2_flat<1><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<1><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 2:
feed_forward_layer_v2_flat<2><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<2><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 4:
feed_forward_layer_v2_flat<4><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<4><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 8:
feed_forward_layer_v2_flat<8><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<8><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 16:
feed_forward_layer_v2_flat<16><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<16><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 32:
feed_forward_layer_v2_flat<32><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<32><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 64:
feed_forward_layer_v2_flat<64><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<64><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 128:
feed_forward_layer_v2_flat<128><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<128><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 256:
feed_forward_layer_v2_flat<256><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<256><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 512:
feed_forward_layer_v2_flat<512><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<512><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
case 1024:
feed_forward_layer_v2_flat<1024><<<n_hidden, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
feed_forward_layer_v2<1024><<<n_output, gpu_opt_ff_ih_bsize, gpu_opt_ff_ih_bsize*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
break;
}
}*/
void GPUNet::feed_forward_v2(float* d_set, int i) {
int threads = GPUNetSettings::GPU_DEFAULT_BLOCK_SIZE;
if (gpu_opt_ff_ih_bsize <= 32) {
feed_forward_layer_v2_flat<32><<<n_hidden, 32, 32*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 64) {
feed_forward_layer_v2_flat<64><<<n_hidden, 64, 64*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 128) {
feed_forward_layer_v2_flat<128><<<n_hidden, 128, 128*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 256) {
feed_forward_layer_v2_flat<256><<<n_hidden, 256, 256*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 512) {
feed_forward_layer_v2_flat<512><<<n_hidden, 512, 512*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else if (gpu_opt_ff_ih_bsize <= 1024) {
feed_forward_layer_v2_flat<1024><<<n_hidden, 1024, 1024*sizeof(float)>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
} else {
feed_forward_layer_v1_2_flat<<<(n_hidden+threads-1)/threads, threads>>>(n_input, n_hidden, d_set, i, d_hidden, d_ih_weights);
}
if (gpu_opt_ff_ho_bsize <= 32) {
feed_forward_layer_v2<32><<<n_output, 32, 32*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 64) {
feed_forward_layer_v2<64><<<n_output, 64, 64*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 128) {
feed_forward_layer_v2<128><<<n_output, 128, 128*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 256) {
feed_forward_layer_v2<256><<<n_output, 256, 256*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 512) {
feed_forward_layer_v2<512><<<n_output, 512, 512*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else if (gpu_opt_ff_ho_bsize <= 1024) {
feed_forward_layer_v2<1024><<<n_output, 1024, 1024*sizeof(float)>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
} else {
feed_forward_layer_v1_2<<<(n_output+threads-1)/threads, threads>>>(n_hidden, n_output, d_hidden, d_output, d_ho_weights);
}
}
bool GPUNet::validate_output(float* desired_output) {
//copy output back to host
CUDA_CHECK_RETURN(cudaMemcpy(h_output, d_output, n_output*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < n_output; ++i) {
//std::cout << "actual = " << desired_output[i] << ", calc = " << h_output[i] << std::endl;
if (abs(desired_output[i] - h_output[i]) > .005)
return false;
}
return true;
}
bool GPUNet::validate_weights(float *desired_ih_weights, float *desired_ho_weights) {
//copy inp hid weights to host
CUDA_CHECK_RETURN(cudaMemcpy(h_ih_weights, d_ih_weights, (n_input+1)*n_hidden*sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(h_ho_weights, d_ho_weights, (n_hidden+1)*n_output*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < (n_input+1)*n_hidden; ++i) {
if (abs(desired_ih_weights[i] - h_ih_weights[i]) > .001)
return false;
}
for (int i = 0; i < (n_hidden+1)*n_output; ++i) {
if (abs(desired_ho_weights[i] - h_ho_weights[i]) > .001)
return false;
}
return true;
}
void GPUNet::test_feed_forward(Net &net, NetData &d) {
std::cout << "Testing feed forward functions" << std::endl;
//std::cout << "feed forward CPU" << std::endl;
//net.print_network();
net.feed_forward(&(d.get_training_dataset()->training_set[0]));
//std::cout << "feed forward CPU time: " << ((float)(finish-start)) / CLOCKS_PER_SEC << "s\n\n";
//net.print_network();
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
std::cout << "Testing feedforward v1.2" << std::endl;
feed_forward_v1_2(d_training_set, 0);
std::cout << "Validates: " << validate_output(net.outputNeurons) << std::endl << std::endl;
//net.print_network();
//print_net();
CUDA_CHECK_RETURN(cudaMemset(d_output, 0, n_output*sizeof(float)));
//std::cout << "Testing method 1.3" << std::endl;
//feed_forward_v1_3(&(d_training_set[0]));
//std::cout << "Validates: " << validate_output(net.outputNeurons) << "\n";
//net.print_network();
//print_net();
//CUDA_CHECK_RETURN(cudaMemset(d_output, 0, n_output*sizeof(float)));
std::cout << "Testing feedforward v2" << std::endl;
feed_forward_v2(d_training_set, 0);
std::cout << "Validates: " << validate_output(net.outputNeurons) << std::endl << std::endl;
CUDA_CHECK_RETURN(cudaMemset(d_output, 0, n_output*sizeof(float)));
/*std::cout << "Testing method 2.2" << std::endl;
feed_forward_v2_2();
std::cout << "Validates: " << validates(net.outputNeurons) << "\n";
CUDA_CHECK_RETURN(cudaMemset(d_output, 0, n_output*sizeof(float)));*/
CUDA_CHECK_RETURN(cudaFree(d_training_set));
}
void GPUNet::test_backprop(Net &net, NetData &d) {
std::cout << "Testing backpropagation functions" << std::endl;
NetTrainer nt(&net);
//std::cout << "CPU net 0" << std::endl;
//net.print_network();
net.feed_forward(&(d.get_training_dataset()->training_set[0]));
//std::cout << "CPU net 1" << std::endl;
//net.print_network();
nt.backprop(&(d.get_training_dataset()->training_set[0+n_input+1]));
//std::cout << "CPU net 2" << std::endl;
//net.print_network();
int i = 0, t = n_input+1;
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
// std::cout << "Testing backprop v2" << std::endl;
//// std::cout << std::endl << "GPU net 0" << std::endl;
//// print_net();
//// std::cout << std::endl;
//
// feed_forward_v1_2(d_training_set, i);
//// std::cout << "GPU net 1" << std::endl;
//// print_net();
//// std::cout << std::endl;
//
//// std::cout << "GPU net 2" << std::endl;
// backprop_v2(d_training_set, i, t);
// CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//// print_net();
//// std::cout << std::endl;
// std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl << std::endl;
std::cout << "Testing backprop v3" << std::endl;
//std::cout << std::endl << "GPU net 0" << std::endl;
//print_net();
//std::cout << std::endl;
//int i = 0, t = n_input+1;
feed_forward_v1_2(d_training_set, i);
//std::cout << "GPU net 1" << std::endl;
//print_net();
//std::cout << std::endl;
//std::cout << "GPU net 2" << std::endl;
backprop_v3(d_training_set, i, t);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
net.print_network();
print_net();
std::cout << std::endl;
std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl << std::endl;
// net.feed_forward(&(tset->training_set[tset->fpp]));
// nt.backprop(&(tset->training_set[tset->fpp+n_input+1]));
// i = tset->fpp, t = i+n_input+1;
// feed_forward_v1_2(d_training_set, i);
// backprop_v2(d_training_set, i, t);
// std::cout << "Validates: " << validate_weights(net.wInputHidden, net.wHiddenOutput) << std::endl;
}
void GPUNet::run_parallel(Net &net, NetData &d) {
std::cout << "Running in parallel" <<std::endl;
TrainingDataSet *tset = d.get_training_dataset();
float *d_training_set;
GPUNet::copy_to_device(tset->training_set, tset->n_training, tset->fpp, &d_training_set);
NetTrainer nt(&net);
// std::cout << "CPU network" << std::endl;
// net.print_network();
// std::cout << "GPU network" << std::endl;
// print_net();
// std::cout << std::endl;
int e = 0;
std::string r = "";
while (true) {
std::cout << "Epoch " << e++ << std::endl;
//for (int i = 0; i < d.get_training_dataset()->n_training; ++i) {
//int inp = i*tset->fpp;
//int tar = inp+n_input+1;
//net.feed_forward(&(d.get_training_dataset()->training_set[inp]));
//nt.backprop(&(d.get_training_dataset()->training_set[tar]));
nt.run_training_epoch(d.get_training_dataset());
run_training_epoch_dev(d_training_set, tset->n_training, tset->fpp);
//feed_forward_v1_2(d_training_set, inp);
//backprop_v2(d_training_set, inp, tar);
//std::cout << "CPU network" << std::endl;
//net.print_network();
//std::cout << "GPU network" << std::endl;
//print_net();
bool v = validate_weights(net.wInputHidden, net.wHiddenOutput);
std::cout << "Validates: " << v << std::endl;
if (!v) {
// std::cout << "CPU network" << std::endl;
// net.print_network();
// std::cout << "GPU network" << std::endl;
// print_net();
}
copy_error_to_host(&trainingSetMSE, &trainingSetAccuracy);
std::cout << "GPU error: " << trainingSetMSE << ", " << trainingSetAccuracy << std::endl;
std::cout << "CPU error: " << nt.trainingSetMSE << ", " << nt.trainingSetAccuracy<< std::endl;
std::getline(std::cin, r);
if (r == "exit") {
return;
}
//}
}
}
size_t GPUNet::current_mem_usage(int dev) {
return gpu_mem[dev];
}
/*
* ------------ private ------------
*/
void GPUNet::add_gpu_mem(int bytes) {
gpu_mem[get_current_device()] += bytes;
}
int GPUNet::get_current_device() {
int device;
cudaGetDevice(&device);
return device;
}
size_t GPUNet::dataset_size(TrainingDataSet *tset) {
size_t tset_size = 0;
int fv_size = (n_input + n_output) * sizeof(float);
tset_size += fv_size * tset->n_training;
tset_size += fv_size * tset->n_generalization;
tset_size += fv_size * tset->n_validation;
return tset_size;
}
size_t GPUNet::total_dev_mem(int dev) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
return props.totalGlobalMem - 1213382500; // - 206688900; //minus 1.5 gb
}
void GPUNet::copy_to_device(float* set, int n_patterns, int fpp, float **d_set) {
CUDA_CHECK_RETURN(cudaMalloc((void**)d_set, n_patterns*fpp*sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpy(*d_set, set, n_patterns*fpp*sizeof(float), cudaMemcpyHostToDevice));
}
|
532fe4066a453b7f34df99f89e9d8a4ade74ef68.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
// Include the header file
// Uncomment below if including into a `.c` file rather than `.cu` file
//extern "C" {
#include "NBodyVisualiser.h"
//}
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#define TIMING_FRAME_COUNT 20
// User supplied globals
static unsigned int N;
static unsigned int D;
static MODE M;
const float *PositionsX = 0;
const float *PositionsY = 0;
const nbody_soa *Bodies = 0;
const float *Densities = 0;
void(*simulate_function)(void) = 0;
// Instancing variables for histogram
GLuint vao_hist = 0;
GLuint vao_hist_vertices = 0;
GLuint tbo_hist = 0;
GLuint tex_hist = 0;
GLuint vao_hist_instance_ids = 0;
// Instancing variables for nbody
GLuint vao_nbody = 0;
GLuint vao_nbody_vertices = 0;
GLuint tbo_nbody = 0;
GLuint tex_nbody = 0;
GLuint vao_nbody_instance_ids = 0;
// Mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_z = 0.0;
float translate_z = -1.0;
// Vertex shader handles
GLuint vs_hist_shader = 0;
GLuint vs_nbody_shader = 0;
GLuint vs_hist_program = 0;
GLuint vs_nbody_program = 0;
GLuint vs_hist_instance_index = 0;
GLuint vs_nbody_instance_index = 0;
// Render options
bool display_bodies = true;
bool display_density = false;
// Cuda graphics resources
struct cudaGraphicsResource *cuda_nbody_vbo_resource;
struct cudaGraphicsResource *cuda_hist_vbo_resource;
// Timing variables for FPS calculation
float elapsed = 0;
float prev_time = 0;
unsigned int frames;
char title[128];
// Function prototypes
void displayLoop(void);
void initHistShader();
void initNBodyShader();
void initHistVertexData();
void initNBodyVertexData();
void initGL();
void destroyViewer();
void render(void);
void checkGLError();
void handleKeyboardDefault(unsigned char key, int x, int y);
void handleMouseDefault(int button, int state, int x, int y);
void handleMouseMotionDefault(int x, int y);
void checkCUDAError(const char *msg);
// Vertex shader source code
const char* hist_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" float instance_data = texelFetchBuffer(instance_tex, int(instance_index)).x; \n"
" vec4 position = vec4(gl_Vertex.x, gl_Vertex.y, 0.0f, 1.0f); \n"
" gl_FrontColor = vec4(instance_data, 0.0f, 0.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
const char* nbody_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" vec2 instance_data = texelFetchBuffer(instance_tex, int(instance_index)).xy; \n"
" vec4 position = vec4(gl_Vertex.x+instance_data.x, \n"
" gl_Vertex.y+instance_data.y, \n"
" gl_Vertex.z, 1.0f); \n"
" gl_FrontColor = vec4(1.0f, 1.0f, 1.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
//////////////////////////////// CUDA Kernels ////////////////////////////////
__global__ void copyNBodyData2f(float* buffer, const float *x, const float *y, const unsigned int N) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
// Copy data to mapped `buffer`, which should have length at least `2N`
float* ptr = &buffer[i + i]; // Locate the address of position `2i` in `buffer` for thread `i`
ptr[0] = x[i];
ptr[1] = y[i];
}
}
__global__ void copyNBodyData(float* buffer, const nbody_soa* bodies, const unsigned int N) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
// Copy data to mapped `buffer`, which should have length at least `2N`
float* ptr = &buffer[i + i]; // Locate the address of position `2i` in `buffer` for thread `i`
ptr[0] = bodies->x[i];
ptr[1] = bodies->y[i];
}
}
__global__ void copyHistData(float* buffer, const float* densities, const unsigned int D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < D*D) {
// Copy data to mapped `buffer`, which should have length at least `D^2`
buffer[i] = densities[i];
}
}
//////////////////////////////// Header declared functions ////////////////////////////////
void initViewer(unsigned int n, unsigned int d, MODE m, void(*simulate)(void)) {
N = n;
D = d;
M = m;
simulate_function = simulate;
// Check for Unified Variable Addressing (UVA) - not available in 32 bit host mode
if (M == CUDA) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (prop.unifiedAddressing != 1) {
printf("Error: No Unified Variable Addressing found. Are you trying to build your CUDA code in 32bit mode?\n");
}
}
// Initialise the OpenGL viewer and context
initGL();
// Initialise our instance rendering and the data
initHistShader();
initNBodyShader();
initHistVertexData();
initNBodyVertexData();
}
void setNBodyPositions2f(const float *positions_x, const float *positions_y) {
// Check that the supplied pointers are device pointers when in `CUDA` mode
if (M == CUDA) {
hipPointerAttribute_t attributes;
// Host allocated memory will cause an error - check for `positions_x`
if (hipPointerGetAttributes(&attributes, positions_x) == hipErrorInvalidValue) {
hipGetLastError(); // Clear out the previous API error
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `hipMemoryTypeHost`, which can't be used by the device.
if (attributes.type != hipMemoryTypeDevice) {
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// Host allocated memory will cause an error - check for `positions_y`
if (hipPointerGetAttributes(&attributes, positions_y) == hipErrorInvalidValue) {
hipGetLastError(); // Clear out the previous API error
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `hipMemoryTypeHost`, which can't be used by the device.
if (attributes.type != hipMemoryTypeDevice) {
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
}
PositionsX = positions_x;
PositionsY = positions_y;
if (Bodies != 0){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setNBodyPositions(const nbody_soa *bodies) {
// Check that the supplied pointer is a device pointer when in `CUDA` mode
if (M == CUDA) {
hipPointerAttribute_t attributes;
// Host allocated memory will cause an error - check for `bodies`
if (hipPointerGetAttributes(&attributes, bodies) == hipErrorInvalidValue) {
hipGetLastError(); // Clear out the previous API error
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `hipMemoryTypeHost`, which can't be used by the device.
if (attributes.type != hipMemoryTypeDevice) {
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
}
Bodies = bodies;
if ((PositionsX != 0) || (PositionsY != 0)) {
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setHistogramData(const float *densities) { // Alias function to avoid repetition of code
setActivityMapData(densities);
}
void setActivityMapData(const float *activity) {
// Check that the supplied pointer is a device pointer when in `CUDA` mode
if (M == CUDA){
hipPointerAttribute_t attributes;
// Host allocated memory will cause an error - check for `activity`
if (hipPointerGetAttributes(&attributes, activity) == hipErrorInvalidValue) {
hipGetLastError(); // Clear out the previous API error
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `hipMemoryTypeHost`, which can't be used by the device.
if (attributes.type != hipMemoryTypeDevice) {
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
}
Densities = activity;
}
void startVisualisationLoop() {
glutMainLoop();
}
//////////////////////////////// Source module functions ////////////////////////////////
void displayLoop(void) {
unsigned int i;
float *dptr;
size_t num_bytes;
unsigned int blocks;
float t;
if (simulate_function == 0) {
printf("Error: Simulate function has not been defined by calling initViewer(...)\n");
return;
}
// Frames Per Second timing
if (M == CUDA) {
hipDeviceSynchronize();
}
t = (float)clock(); // Take a timestamp (usually clock ticks measures milliseconds)
if (prev_time) { // Update the elapsed time (ms) if not the first iteration of the display loop
elapsed += t - prev_time;
}
prev_time = t;
frames++; // Increment the frame counter
if (frames == TIMING_FRAME_COUNT) { // Measure FPS and write to window title after every 20 frames (`TIMING_FRAME_COUNT`)
frames = 0; // Reset the frames counter
elapsed *= CLOCKS_PER_SEC; // Calculate elapsed time in seconds
sprintf(title, "Com4521 Assignment - NBody Visualiser (%f FPS)", (float)TIMING_FRAME_COUNT / elapsed);
glutSetWindowTitle(title);
elapsed = 0; // Reset elapsed time
}
// Call the simulation function
simulate_function();
// Map data from user supplied pointers into Texture Buffer Object
if (M == CUDA) { // Map data from user supplied pointers into TBO using CUDA
// Nbody positions data: map buffer to device pointer so a GPU kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = N * 3 * sizeof(float);
hipGraphicsMapResources(1, &cuda_nbody_vbo_resource, 0);
hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_nbody_vbo_resource);
// Prepare kernel launch parameters
blocks = N / 256;
if ((N % 256) != 0) { // 256 threads per block, ensure least number of blocks for total threads to exceed `N`
blocks++;
}
// Kernel to map data into buffer - two possible formats for users to supplier body position data
if (Bodies != 0) {
copyNBodyData << <blocks, 256 >> >(dptr, Bodies, N);
}
else if ((PositionsX != 0) && (PositionsY != 0)) {
copyNBodyData2f << <blocks, 256 >> >(dptr, PositionsX, PositionsY, N);
}
hipGraphicsUnmapResources(1, &cuda_nbody_vbo_resource, 0);
checkCUDAError("Error copying NBody position data from supplied device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// Histogram/activity map data: map buffer to device pointer so a GPU kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
num_bytes = D * D * sizeof(float);
hipGraphicsMapResources(1, &cuda_hist_vbo_resource, 0);
hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_hist_vbo_resource);
// Prepare kernel launch parameters
blocks = D * D / 256;
if (((D * D) % 256) != 0) { // 256 threads per block, ensure least number of blocks for total threads to exceed `D^2`
blocks++;
}
// Kernel to map data into buffer
copyHistData << <blocks, 256 >> >(dptr, Densities, D);
hipGraphicsUnmapResources(1, &cuda_hist_vbo_resource, 0);
checkCUDAError("Error copying Activity Map data from supplied device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
else { // Map data from user supplied pointers into Texture Buffer Object using CPU for `CPU` or `OPENMP` mode
// Map buffer to Texture Buffer Object for Nbody positions and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); // `tbo_nbody` buffer
if (dptr == 0) {
printf("Error: Unable to map NBody Texture Buffer Object\n");
return;
}
if (Bodies != 0) {
for (i = 0; i < N; i++) {
unsigned int index = i + i;
dptr[index] = Bodies->x[i];
dptr[index + 1] = Bodies->y[i];
}
}
else if ((PositionsX != 0) && (PositionsY != 0)) {
for (i = 0; i < N; i++) {
unsigned int index = i + i;
dptr[index] = PositionsX[i];
dptr[index + 1] = PositionsY[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// Map histogram buffer to positions Texture Body Object and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); // `tbo_hist` buffer
if (dptr == 0) {
printf("Error: Unable to map Histogram Texture Buffer Object\n");
return;
}
if (Densities != 0) {
for (i = 0; i < D * D; i++) {
dptr[i] = Densities[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
// Render
render();
checkGLError();
}
void initHistShader() {
// Histogram vertex shader
vs_hist_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_hist_shader, 1, &hist_vertexShaderSource, 0);
glCompileShader(vs_hist_shader);
// Check for errors
GLint status;
glGetShaderiv(vs_hist_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: Histogram Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_hist_shader, 1024, &len, data);
printf("%s", data);
}
// Program
vs_hist_program = glCreateProgram();
glAttachShader(vs_hist_program, vs_hist_shader);
glLinkProgram(vs_hist_program);
glGetProgramiv(vs_hist_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: Histogram Shader Program Link Error\n");
}
glUseProgram(vs_hist_program);
// Get shader variables
vs_hist_instance_index = glGetAttribLocation(vs_hist_program, "instance_index");
if (vs_hist_instance_index == (GLuint)-1) {
printf("Warning: Histogram Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
// Check for any errors
checkGLError();
}
void initNBodyShader() {
// nbody vertex shader
vs_nbody_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_nbody_shader, 1, &nbody_vertexShaderSource, 0);
glCompileShader(vs_nbody_shader);
// Check for errors
GLint status;
glGetShaderiv(vs_nbody_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: nbody Program Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_nbody_shader, 1024, &len, data);
printf("%s", data);
}
// Program
vs_nbody_program = glCreateProgram();
glAttachShader(vs_nbody_program, vs_nbody_shader);
glLinkProgram(vs_nbody_program);
glGetProgramiv(vs_nbody_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: NBody Shader Program Link Error\n");
}
glUseProgram(vs_nbody_program);
// Get shader variables
vs_nbody_instance_index = glGetAttribLocation(vs_nbody_program, "instance_index");
if (vs_nbody_instance_index == (GLuint)-1) {
printf("Warning: nbody Program Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
// Check for any errors
checkGLError();
}
void initHistVertexData() {
/* Vertex Array Object */
glGenVertexArrays(1, &vao_hist); // Create our Vertex Array Object
glBindVertexArray(vao_hist); // Bind our Vertex Array Object so we can use it
/* Create a vertex buffer */
// Create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_hist_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_vertices);
glBufferData(GL_ARRAY_BUFFER, D * D * 4 * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
float quad_size = 1.0f / D;
for (unsigned int y = 0; y < D; y++) {
for (unsigned int x = 0; x < D; x++) {
int offset = (D * y + x) * 3 * 4;
float x_min = (float)x / D;
float y_min = (float)y / D;
// First vertex
verts[offset + 0] = x_min - 0.5f;
verts[offset + 1] = y_min - 0.5f;
verts[offset + 2] = 0.0f;
// Second vertex
verts[offset + 3] = x_min - 0.5f;
verts[offset + 4] = y_min + quad_size - 0.5f;
verts[offset + 5] = 0.0f;
// Third vertex
verts[offset + 6] = x_min + quad_size - 0.5f;
verts[offset + 7] = y_min + quad_size - 0.5f;
verts[offset + 8] = 0.0f;
// Fourth vertex
verts[offset + 9] = x_min + quad_size - 0.5f;
verts[offset + 10] = y_min - 0.5f;
verts[offset + 11] = 0.0f;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_hist_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_instance_ids);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int y = 0; y < D; y++) {
for (unsigned int x = 0; x < D; x++) {
int index = D * y + x;
int offset = index + index + index + index; // int offset = index * 4
// Four vertices (a quad) have the same instance index
ids[offset + 0] = index;
ids[offset + 1] = index;
ids[offset + 2] = index;
ids[offset + 3] = index;
}
}
// Map instance
glVertexAttribIPointer((GLuint)vs_hist_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_hist_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* Texture buffer object */
glGenBuffers(1, &tbo_hist);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_hist);
glBufferData(GL_TEXTURE_BUFFER, D * D * 1 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 1 `float` element in a texture buffer object for histogram density
/* Generate texture */
glGenTextures(1, &tex_hist);
glBindTexture(GL_TEXTURE_BUFFER, tex_hist);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, tbo_hist);
// Create CUDA GL resource to write CUDA data to Texture Buffer Object
if (M == CUDA) {
hipGraphicsGLRegisterBuffer(&cuda_hist_vbo_resource, tbo_hist, hipGraphicsMapFlagsWriteDiscard);
}
// Unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
// Unbind VAO
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void initNBodyVertexData() {
/* Vertex Array Object */
glGenVertexArrays(1, &vao_nbody); // Create our Vertex Array Object
glBindVertexArray(vao_nbody); // Bind our Vertex Array Object so we can use it
/* Create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_nbody_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_vertices);
glBufferData(GL_ARRAY_BUFFER, N * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
int offset = i + i + i; // int offset = i * 3
// Vertex point
verts[offset + 0] = -0.5f;
verts[offset + 1] = -0.5f;
verts[offset + 2] = 0.0f;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_nbody_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_instance_ids);
glBufferData(GL_ARRAY_BUFFER, N * 1 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
//single vertex as it is a point
ids[i] = i;
}
// Map instance
glVertexAttribIPointer((GLuint)vs_nbody_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_nbody_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
// Check for errors
checkGLError();
/* Texture Buffer Object */
glGenBuffers(1, &tbo_nbody);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_nbody);
glBufferData(GL_TEXTURE_BUFFER, N * 2 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 2 `float` elements in a texture buffer object for x and y position
/* Generate texture */
glGenTextures(1, &tex_nbody);
glBindTexture(GL_TEXTURE_BUFFER, tex_nbody);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, tbo_nbody);
// Create CUDA GL resource to write CUDA data to Texture Buffer Object
if (M == CUDA) {
hipGraphicsGLRegisterBuffer(&cuda_nbody_vbo_resource, tbo_nbody, hipGraphicsMapFlagsWriteDiscard);
}
// Unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
// Unbind VAO
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void destroyViewer() {
checkGLError();
// Cleanup histigram VAO
glBindVertexArray(vao_hist);
glDeleteBuffers(1, &vao_hist_vertices);
vao_hist_vertices = 0;
glDeleteBuffers(1, &vao_hist_instance_ids);
vao_hist_instance_ids = 0;
glDeleteBuffers(1, &tbo_hist);
tbo_hist = 0;
glDeleteTextures(1, &tex_hist);
tex_hist = 0;
if (M == CUDA) {
hipGraphicsUnregisterResource(cuda_hist_vbo_resource);
}
glDeleteVertexArrays(1, &vao_hist);
vao_hist = 0;
// Cleanup nbody VAO
glBindVertexArray(vao_nbody);
glDeleteBuffers(1, &vao_nbody_vertices);
vao_nbody_vertices = 0;
glDeleteBuffers(1, &vao_nbody_instance_ids);
vao_nbody_instance_ids = 0;
glDeleteBuffers(1, &tbo_nbody);
tbo_nbody = 0;
glDeleteTextures(1, &tex_nbody);
tex_nbody = 0;
if (M == CUDA) {
hipGraphicsUnregisterResource(cuda_nbody_vbo_resource);
}
glDeleteVertexArrays(1, &vao_nbody);
vao_nbody = 0;
checkGLError();
}
void initGL() {
// Specify command line argument for window name and initialise glut program with `glutInit` function
int argc = 1;
char * argv[] = { "Com4521 Assignment - NBody Visualiser" };
glutInit(&argc, argv);
// Initialise window
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT);
glutInitWindowPosition(100, 0);
glutCreateWindow(*argv);
// glew init (must be done after window creation for some odd reason)
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.\n");
fflush(stderr);
exit(0);
}
// Register default callbacks
glutDisplayFunc(displayLoop);
glutKeyboardFunc(handleKeyboardDefault);
glutMotionFunc(handleMouseMotionDefault);
glutMouseFunc(handleMouseDefault);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
// Default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// Viewport
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// Projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)WINDOW_WIDTH / (GLfloat)WINDOW_HEIGHT, 0.001, 10.0);
}
void render(void) {
// Set view matrix and prepare for rendering
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Transformations
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_z, 0.0, 0.0, 1.0);
// Render the density field
if (display_density) {
// Attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_hist_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_hist);
// Bind and activate texture with instance data (held with the Texture Buffer Object)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_hist);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_QUADS, 0, 4 * D * D);
// Unbind the Vertex Array Object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
// Render the n bodies
if (display_bodies) {
// Attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_nbody_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_nbody);
// Bind and activate texture with instance data (held with the Texture Buffer Object)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_nbody);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_POINTS, 0, 1 * N);
// Unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
glutSwapBuffers();
glutPostRedisplay();
}
void checkGLError() {
int Error;
if ((Error = glGetError()) != GL_NO_ERROR) {
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
void handleKeyboardDefault(unsigned char key, int x, int y) {
switch (key) {
case(27): case('q'): // Escape `Esc` key or `q` key
// Return control to the users program to allow them to clean-up any allocated memory etc.
glutLeaveMainLoop();
break;
case('b'): // `b` key to toggle display bodies
display_bodies = !display_bodies;
break;
case('d'): // `d` key to toggle display activity grid map
display_density = !display_density;
break;
}
}
void handleMouseDefault(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void handleMouseMotionDefault(int x, int y) {
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1) { // Rotate with left click and mouse motion
rotate_x += dy * 0.2f;
rotate_z += dx * 0.2f;
}
else if (mouse_buttons & 4) { // Zoom out/in with right click and mouse motion up/down
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkCUDAError(const char* msg) {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
| 532fe4066a453b7f34df99f89e9d8a4ade74ef68.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
// Include the header file
// Uncomment below if including into a `.c` file rather than `.cu` file
//extern "C" {
#include "NBodyVisualiser.h"
//}
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#define TIMING_FRAME_COUNT 20
// User supplied globals
static unsigned int N;
static unsigned int D;
static MODE M;
const float *PositionsX = 0;
const float *PositionsY = 0;
const nbody_soa *Bodies = 0;
const float *Densities = 0;
void(*simulate_function)(void) = 0;
// Instancing variables for histogram
GLuint vao_hist = 0;
GLuint vao_hist_vertices = 0;
GLuint tbo_hist = 0;
GLuint tex_hist = 0;
GLuint vao_hist_instance_ids = 0;
// Instancing variables for nbody
GLuint vao_nbody = 0;
GLuint vao_nbody_vertices = 0;
GLuint tbo_nbody = 0;
GLuint tex_nbody = 0;
GLuint vao_nbody_instance_ids = 0;
// Mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_z = 0.0;
float translate_z = -1.0;
// Vertex shader handles
GLuint vs_hist_shader = 0;
GLuint vs_nbody_shader = 0;
GLuint vs_hist_program = 0;
GLuint vs_nbody_program = 0;
GLuint vs_hist_instance_index = 0;
GLuint vs_nbody_instance_index = 0;
// Render options
bool display_bodies = true;
bool display_density = false;
// Cuda graphics resources
struct cudaGraphicsResource *cuda_nbody_vbo_resource;
struct cudaGraphicsResource *cuda_hist_vbo_resource;
// Timing variables for FPS calculation
float elapsed = 0;
float prev_time = 0;
unsigned int frames;
char title[128];
// Function prototypes
void displayLoop(void);
void initHistShader();
void initNBodyShader();
void initHistVertexData();
void initNBodyVertexData();
void initGL();
void destroyViewer();
void render(void);
void checkGLError();
void handleKeyboardDefault(unsigned char key, int x, int y);
void handleMouseDefault(int button, int state, int x, int y);
void handleMouseMotionDefault(int x, int y);
void checkCUDAError(const char *msg);
// Vertex shader source code
const char* hist_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" float instance_data = texelFetchBuffer(instance_tex, int(instance_index)).x; \n"
" vec4 position = vec4(gl_Vertex.x, gl_Vertex.y, 0.0f, 1.0f); \n"
" gl_FrontColor = vec4(instance_data, 0.0f, 0.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
const char* nbody_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" vec2 instance_data = texelFetchBuffer(instance_tex, int(instance_index)).xy; \n"
" vec4 position = vec4(gl_Vertex.x+instance_data.x, \n"
" gl_Vertex.y+instance_data.y, \n"
" gl_Vertex.z, 1.0f); \n"
" gl_FrontColor = vec4(1.0f, 1.0f, 1.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
//////////////////////////////// CUDA Kernels ////////////////////////////////
__global__ void copyNBodyData2f(float* buffer, const float *x, const float *y, const unsigned int N) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
// Copy data to mapped `buffer`, which should have length at least `2N`
float* ptr = &buffer[i + i]; // Locate the address of position `2i` in `buffer` for thread `i`
ptr[0] = x[i];
ptr[1] = y[i];
}
}
__global__ void copyNBodyData(float* buffer, const nbody_soa* bodies, const unsigned int N) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
// Copy data to mapped `buffer`, which should have length at least `2N`
float* ptr = &buffer[i + i]; // Locate the address of position `2i` in `buffer` for thread `i`
ptr[0] = bodies->x[i];
ptr[1] = bodies->y[i];
}
}
__global__ void copyHistData(float* buffer, const float* densities, const unsigned int D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < D*D) {
// Copy data to mapped `buffer`, which should have length at least `D^2`
buffer[i] = densities[i];
}
}
//////////////////////////////// Header declared functions ////////////////////////////////
void initViewer(unsigned int n, unsigned int d, MODE m, void(*simulate)(void)) {
N = n;
D = d;
M = m;
simulate_function = simulate;
// Check for Unified Variable Addressing (UVA) - not available in 32 bit host mode
if (M == CUDA) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (prop.unifiedAddressing != 1) {
printf("Error: No Unified Variable Addressing found. Are you trying to build your CUDA code in 32bit mode?\n");
}
}
// Initialise the OpenGL viewer and context
initGL();
// Initialise our instance rendering and the data
initHistShader();
initNBodyShader();
initHistVertexData();
initNBodyVertexData();
}
void setNBodyPositions2f(const float *positions_x, const float *positions_y) {
// Check that the supplied pointers are device pointers when in `CUDA` mode
if (M == CUDA) {
cudaPointerAttributes attributes;
// Host allocated memory will cause an error - check for `positions_x`
if (cudaPointerGetAttributes(&attributes, positions_x) == cudaErrorInvalidValue) {
cudaGetLastError(); // Clear out the previous API error
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `cudaMemoryTypeHost`, which can't be used by the device.
if (attributes.type != cudaMemoryTypeDevice) {
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// Host allocated memory will cause an error - check for `positions_y`
if (cudaPointerGetAttributes(&attributes, positions_y) == cudaErrorInvalidValue) {
cudaGetLastError(); // Clear out the previous API error
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `cudaMemoryTypeHost`, which can't be used by the device.
if (attributes.type != cudaMemoryTypeDevice) {
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
}
PositionsX = positions_x;
PositionsY = positions_y;
if (Bodies != 0){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setNBodyPositions(const nbody_soa *bodies) {
// Check that the supplied pointer is a device pointer when in `CUDA` mode
if (M == CUDA) {
cudaPointerAttributes attributes;
// Host allocated memory will cause an error - check for `bodies`
if (cudaPointerGetAttributes(&attributes, bodies) == cudaErrorInvalidValue) {
cudaGetLastError(); // Clear out the previous API error
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `cudaMemoryTypeHost`, which can't be used by the device.
if (attributes.type != cudaMemoryTypeDevice) {
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
}
Bodies = bodies;
if ((PositionsX != 0) || (PositionsY != 0)) {
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setHistogramData(const float *densities) { // Alias function to avoid repetition of code
setActivityMapData(densities);
}
void setActivityMapData(const float *activity) {
// Check that the supplied pointer is a device pointer when in `CUDA` mode
if (M == CUDA){
cudaPointerAttributes attributes;
// Host allocated memory will cause an error - check for `activity`
if (cudaPointerGetAttributes(&attributes, activity) == cudaErrorInvalidValue) {
cudaGetLastError(); // Clear out the previous API error
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
// If UVA was used, memory allocated by the device may still be `cudaMemoryTypeHost`, which can't be used by the device.
if (attributes.type != cudaMemoryTypeDevice) {
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
}
Densities = activity;
}
void startVisualisationLoop() {
glutMainLoop();
}
//////////////////////////////// Source module functions ////////////////////////////////
void displayLoop(void) {
unsigned int i;
float *dptr;
size_t num_bytes;
unsigned int blocks;
float t;
if (simulate_function == 0) {
printf("Error: Simulate function has not been defined by calling initViewer(...)\n");
return;
}
// Frames Per Second timing
if (M == CUDA) {
cudaDeviceSynchronize();
}
t = (float)clock(); // Take a timestamp (usually clock ticks measures milliseconds)
if (prev_time) { // Update the elapsed time (ms) if not the first iteration of the display loop
elapsed += t - prev_time;
}
prev_time = t;
frames++; // Increment the frame counter
if (frames == TIMING_FRAME_COUNT) { // Measure FPS and write to window title after every 20 frames (`TIMING_FRAME_COUNT`)
frames = 0; // Reset the frames counter
elapsed *= CLOCKS_PER_SEC; // Calculate elapsed time in seconds
sprintf(title, "Com4521 Assignment - NBody Visualiser (%f FPS)", (float)TIMING_FRAME_COUNT / elapsed);
glutSetWindowTitle(title);
elapsed = 0; // Reset elapsed time
}
// Call the simulation function
simulate_function();
// Map data from user supplied pointers into Texture Buffer Object
if (M == CUDA) { // Map data from user supplied pointers into TBO using CUDA
// Nbody positions data: map buffer to device pointer so a GPU kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = N * 3 * sizeof(float);
cudaGraphicsMapResources(1, &cuda_nbody_vbo_resource, 0);
cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_nbody_vbo_resource);
// Prepare kernel launch parameters
blocks = N / 256;
if ((N % 256) != 0) { // 256 threads per block, ensure least number of blocks for total threads to exceed `N`
blocks++;
}
// Kernel to map data into buffer - two possible formats for users to supplier body position data
if (Bodies != 0) {
copyNBodyData << <blocks, 256 >> >(dptr, Bodies, N);
}
else if ((PositionsX != 0) && (PositionsY != 0)) {
copyNBodyData2f << <blocks, 256 >> >(dptr, PositionsX, PositionsY, N);
}
cudaGraphicsUnmapResources(1, &cuda_nbody_vbo_resource, 0);
checkCUDAError("Error copying NBody position data from supplied device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// Histogram/activity map data: map buffer to device pointer so a GPU kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
num_bytes = D * D * sizeof(float);
cudaGraphicsMapResources(1, &cuda_hist_vbo_resource, 0);
cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_hist_vbo_resource);
// Prepare kernel launch parameters
blocks = D * D / 256;
if (((D * D) % 256) != 0) { // 256 threads per block, ensure least number of blocks for total threads to exceed `D^2`
blocks++;
}
// Kernel to map data into buffer
copyHistData << <blocks, 256 >> >(dptr, Densities, D);
cudaGraphicsUnmapResources(1, &cuda_hist_vbo_resource, 0);
checkCUDAError("Error copying Activity Map data from supplied device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
else { // Map data from user supplied pointers into Texture Buffer Object using CPU for `CPU` or `OPENMP` mode
// Map buffer to Texture Buffer Object for Nbody positions and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); // `tbo_nbody` buffer
if (dptr == 0) {
printf("Error: Unable to map NBody Texture Buffer Object\n");
return;
}
if (Bodies != 0) {
for (i = 0; i < N; i++) {
unsigned int index = i + i;
dptr[index] = Bodies->x[i];
dptr[index + 1] = Bodies->y[i];
}
}
else if ((PositionsX != 0) && (PositionsY != 0)) {
for (i = 0; i < N; i++) {
unsigned int index = i + i;
dptr[index] = PositionsX[i];
dptr[index + 1] = PositionsY[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// Map histogram buffer to positions Texture Body Object and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); // `tbo_hist` buffer
if (dptr == 0) {
printf("Error: Unable to map Histogram Texture Buffer Object\n");
return;
}
if (Densities != 0) {
for (i = 0; i < D * D; i++) {
dptr[i] = Densities[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
// Render
render();
checkGLError();
}
void initHistShader() {
// Histogram vertex shader
vs_hist_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_hist_shader, 1, &hist_vertexShaderSource, 0);
glCompileShader(vs_hist_shader);
// Check for errors
GLint status;
glGetShaderiv(vs_hist_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: Histogram Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_hist_shader, 1024, &len, data);
printf("%s", data);
}
// Program
vs_hist_program = glCreateProgram();
glAttachShader(vs_hist_program, vs_hist_shader);
glLinkProgram(vs_hist_program);
glGetProgramiv(vs_hist_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: Histogram Shader Program Link Error\n");
}
glUseProgram(vs_hist_program);
// Get shader variables
vs_hist_instance_index = glGetAttribLocation(vs_hist_program, "instance_index");
if (vs_hist_instance_index == (GLuint)-1) {
printf("Warning: Histogram Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
// Check for any errors
checkGLError();
}
void initNBodyShader() {
// nbody vertex shader
vs_nbody_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_nbody_shader, 1, &nbody_vertexShaderSource, 0);
glCompileShader(vs_nbody_shader);
// Check for errors
GLint status;
glGetShaderiv(vs_nbody_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: nbody Program Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_nbody_shader, 1024, &len, data);
printf("%s", data);
}
// Program
vs_nbody_program = glCreateProgram();
glAttachShader(vs_nbody_program, vs_nbody_shader);
glLinkProgram(vs_nbody_program);
glGetProgramiv(vs_nbody_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
printf("ERROR: NBody Shader Program Link Error\n");
}
glUseProgram(vs_nbody_program);
// Get shader variables
vs_nbody_instance_index = glGetAttribLocation(vs_nbody_program, "instance_index");
if (vs_nbody_instance_index == (GLuint)-1) {
printf("Warning: nbody Program Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
// Check for any errors
checkGLError();
}
void initHistVertexData() {
/* Vertex Array Object */
glGenVertexArrays(1, &vao_hist); // Create our Vertex Array Object
glBindVertexArray(vao_hist); // Bind our Vertex Array Object so we can use it
/* Create a vertex buffer */
// Create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_hist_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_vertices);
glBufferData(GL_ARRAY_BUFFER, D * D * 4 * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
float quad_size = 1.0f / D;
for (unsigned int y = 0; y < D; y++) {
for (unsigned int x = 0; x < D; x++) {
int offset = (D * y + x) * 3 * 4;
float x_min = (float)x / D;
float y_min = (float)y / D;
// First vertex
verts[offset + 0] = x_min - 0.5f;
verts[offset + 1] = y_min - 0.5f;
verts[offset + 2] = 0.0f;
// Second vertex
verts[offset + 3] = x_min - 0.5f;
verts[offset + 4] = y_min + quad_size - 0.5f;
verts[offset + 5] = 0.0f;
// Third vertex
verts[offset + 6] = x_min + quad_size - 0.5f;
verts[offset + 7] = y_min + quad_size - 0.5f;
verts[offset + 8] = 0.0f;
// Fourth vertex
verts[offset + 9] = x_min + quad_size - 0.5f;
verts[offset + 10] = y_min - 0.5f;
verts[offset + 11] = 0.0f;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_hist_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_instance_ids);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int y = 0; y < D; y++) {
for (unsigned int x = 0; x < D; x++) {
int index = D * y + x;
int offset = index + index + index + index; // int offset = index * 4
// Four vertices (a quad) have the same instance index
ids[offset + 0] = index;
ids[offset + 1] = index;
ids[offset + 2] = index;
ids[offset + 3] = index;
}
}
// Map instance
glVertexAttribIPointer((GLuint)vs_hist_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_hist_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* Texture buffer object */
glGenBuffers(1, &tbo_hist);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_hist);
glBufferData(GL_TEXTURE_BUFFER, D * D * 1 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 1 `float` element in a texture buffer object for histogram density
/* Generate texture */
glGenTextures(1, &tex_hist);
glBindTexture(GL_TEXTURE_BUFFER, tex_hist);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, tbo_hist);
// Create CUDA GL resource to write CUDA data to Texture Buffer Object
if (M == CUDA) {
cudaGraphicsGLRegisterBuffer(&cuda_hist_vbo_resource, tbo_hist, cudaGraphicsMapFlagsWriteDiscard);
}
// Unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
// Unbind VAO
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void initNBodyVertexData() {
/* Vertex Array Object */
glGenVertexArrays(1, &vao_nbody); // Create our Vertex Array Object
glBindVertexArray(vao_nbody); // Bind our Vertex Array Object so we can use it
/* Create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_nbody_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_vertices);
glBufferData(GL_ARRAY_BUFFER, N * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
int offset = i + i + i; // int offset = i * 3
// Vertex point
verts[offset + 0] = -0.5f;
verts[offset + 1] = -0.5f;
verts[offset + 2] = 0.0f;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_nbody_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_instance_ids);
glBufferData(GL_ARRAY_BUFFER, N * 1 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
//single vertex as it is a point
ids[i] = i;
}
// Map instance
glVertexAttribIPointer((GLuint)vs_nbody_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_nbody_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
// Check for errors
checkGLError();
/* Texture Buffer Object */
glGenBuffers(1, &tbo_nbody);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_nbody);
glBufferData(GL_TEXTURE_BUFFER, N * 2 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 2 `float` elements in a texture buffer object for x and y position
/* Generate texture */
glGenTextures(1, &tex_nbody);
glBindTexture(GL_TEXTURE_BUFFER, tex_nbody);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, tbo_nbody);
// Create CUDA GL resource to write CUDA data to Texture Buffer Object
if (M == CUDA) {
cudaGraphicsGLRegisterBuffer(&cuda_nbody_vbo_resource, tbo_nbody, cudaGraphicsMapFlagsWriteDiscard);
}
// Unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
// Unbind VAO
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void destroyViewer() {
checkGLError();
// Cleanup histigram VAO
glBindVertexArray(vao_hist);
glDeleteBuffers(1, &vao_hist_vertices);
vao_hist_vertices = 0;
glDeleteBuffers(1, &vao_hist_instance_ids);
vao_hist_instance_ids = 0;
glDeleteBuffers(1, &tbo_hist);
tbo_hist = 0;
glDeleteTextures(1, &tex_hist);
tex_hist = 0;
if (M == CUDA) {
cudaGraphicsUnregisterResource(cuda_hist_vbo_resource);
}
glDeleteVertexArrays(1, &vao_hist);
vao_hist = 0;
// Cleanup nbody VAO
glBindVertexArray(vao_nbody);
glDeleteBuffers(1, &vao_nbody_vertices);
vao_nbody_vertices = 0;
glDeleteBuffers(1, &vao_nbody_instance_ids);
vao_nbody_instance_ids = 0;
glDeleteBuffers(1, &tbo_nbody);
tbo_nbody = 0;
glDeleteTextures(1, &tex_nbody);
tex_nbody = 0;
if (M == CUDA) {
cudaGraphicsUnregisterResource(cuda_nbody_vbo_resource);
}
glDeleteVertexArrays(1, &vao_nbody);
vao_nbody = 0;
checkGLError();
}
void initGL() {
// Specify command line argument for window name and initialise glut program with `glutInit` function
int argc = 1;
char * argv[] = { "Com4521 Assignment - NBody Visualiser" };
glutInit(&argc, argv);
// Initialise window
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT);
glutInitWindowPosition(100, 0);
glutCreateWindow(*argv);
// glew init (must be done after window creation for some odd reason)
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.\n");
fflush(stderr);
exit(0);
}
// Register default callbacks
glutDisplayFunc(displayLoop);
glutKeyboardFunc(handleKeyboardDefault);
glutMotionFunc(handleMouseMotionDefault);
glutMouseFunc(handleMouseDefault);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
// Default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// Viewport
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// Projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)WINDOW_WIDTH / (GLfloat)WINDOW_HEIGHT, 0.001, 10.0);
}
void render(void) {
// Set view matrix and prepare for rendering
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Transformations
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_z, 0.0, 0.0, 1.0);
// Render the density field
if (display_density) {
// Attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_hist_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_hist);
// Bind and activate texture with instance data (held with the Texture Buffer Object)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_hist);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_QUADS, 0, 4 * D * D);
// Unbind the Vertex Array Object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
// Render the n bodies
if (display_bodies) {
// Attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_nbody_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_nbody);
// Bind and activate texture with instance data (held with the Texture Buffer Object)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_nbody);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_POINTS, 0, 1 * N);
// Unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
glutSwapBuffers();
glutPostRedisplay();
}
void checkGLError() {
int Error;
if ((Error = glGetError()) != GL_NO_ERROR) {
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
void handleKeyboardDefault(unsigned char key, int x, int y) {
switch (key) {
case(27): case('q'): // Escape `Esc` key or `q` key
// Return control to the users program to allow them to clean-up any allocated memory etc.
glutLeaveMainLoop();
break;
case('b'): // `b` key to toggle display bodies
display_bodies = !display_bodies;
break;
case('d'): // `d` key to toggle display activity grid map
display_density = !display_density;
break;
}
}
void handleMouseDefault(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void handleMouseMotionDefault(int x, int y) {
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1) { // Rotate with left click and mouse motion
rotate_x += dy * 0.2f;
rotate_z += dx * 0.2f;
}
else if (mouse_buttons & 4) { // Zoom out/in with right click and mouse motion up/down
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkCUDAError(const char* msg) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
7765899e67efa8bed6296bf342a022666edae788.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/cluster/spectral.hpp>
#include <cuml/cuml.hpp>
#include "random/rng.h"
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <iostream>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T>
class SpectralTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
};
typedef SpectralTest<float> TestSpectralClustering;
TEST_F(TestSpectralClustering, Fit) {
int n = 500;
int d = 30;
int k = 3;
float *X;
cumlHandle handle;
MLCommon::allocate(X, n * d);
Random::Rng r(150, MLCommon::Random::GenTaps);
r.uniform(X, n * d, -1.0f, 1.0f, handle.getStream());
int *out;
MLCommon::allocate(out, n, true);
ML::Spectral::fit_clusters(handle, X, n, d, k, 10, 1e-3f, out);
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(X));
}
typedef SpectralTest<float> TestSpectralEmbedding;
TEST_F(TestSpectralEmbedding, Fit) {
int n = 500;
int d = 30;
int k = 3;
float *X;
cumlHandle handle;
MLCommon::allocate(X, n * d);
Random::Rng r(150, MLCommon::Random::GenTaps);
r.uniform(X, n * d, -1.0f, 1.0f, handle.getStream());
float *out;
MLCommon::allocate(out, n * 2, true);
ML::Spectral::fit_embedding(handle, X, n, d, k, 2, out);
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(X));
}
} // end namespace ML
| 7765899e67efa8bed6296bf342a022666edae788.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/cluster/spectral.hpp>
#include <cuml/cuml.hpp>
#include "random/rng.h"
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <iostream>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T>
class SpectralTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
};
typedef SpectralTest<float> TestSpectralClustering;
TEST_F(TestSpectralClustering, Fit) {
int n = 500;
int d = 30;
int k = 3;
float *X;
cumlHandle handle;
MLCommon::allocate(X, n * d);
Random::Rng r(150, MLCommon::Random::GenTaps);
r.uniform(X, n * d, -1.0f, 1.0f, handle.getStream());
int *out;
MLCommon::allocate(out, n, true);
ML::Spectral::fit_clusters(handle, X, n, d, k, 10, 1e-3f, out);
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(X));
}
typedef SpectralTest<float> TestSpectralEmbedding;
TEST_F(TestSpectralEmbedding, Fit) {
int n = 500;
int d = 30;
int k = 3;
float *X;
cumlHandle handle;
MLCommon::allocate(X, n * d);
Random::Rng r(150, MLCommon::Random::GenTaps);
r.uniform(X, n * d, -1.0f, 1.0f, handle.getStream());
float *out;
MLCommon::allocate(out, n * 2, true);
ML::Spectral::fit_embedding(handle, X, n, d, k, 2, out);
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(X));
}
} // end namespace ML
|
f45eb0684205c24c132e2c2261f4d719f042e412.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
void SetKDeltaGpuData()
{
hipError_t status;
status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "hipMemcpyToSymbol: SetKDeltaGpuData copy to cData failed");
}
void GetKDeltaGpuData()
{
hipError_t status;
status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "hipMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhOutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearOutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
hipLaunchKernelGGL(( kCalculateLRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope);
LAUNCHERROR("kCalculateLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
hipLaunchKernelGGL(( kCalculateELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha);
LAUNCHERROR("kCalculateELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
hipLaunchKernelGGL(( kCalculateSELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda);
LAUNCHERROR("kCalculateSELUOutputDelta_kernel");
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateIndexedOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateIndexedSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidOutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateIndexedTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedTanhOutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateIndexedLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedLinearOutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateIndexedRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
hipLaunchKernelGGL(( kCalculateIndexedLRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, slope);
LAUNCHERROR("kCalculateIndexedLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
hipLaunchKernelGGL(( kCalculateIndexedELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha);
LAUNCHERROR("kCalculateIndexedELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
hipLaunchKernelGGL(( kCalculateIndexedSELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSELUOutputDelta_kernel");
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateIndexedSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = pData[pos];
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock));
hipLaunchKernelGGL(( kCalculateHingeOutputDelta_kernel), dim3(batch), dim3(threads), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateHingeOutputDelta_kernel");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = pData[pos];
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<typename T> void kCalculateIndexedHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock));
hipLaunchKernelGGL(( kCalculateIndexedHingeOutputDelta_kernel), dim3(batch), dim3(threads), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateHingeOutputDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateIndexedSparseNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha);
LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
hipLaunchKernelGGL(( kCalculateSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T>
void kCalculateIndexedCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
hipLaunchKernelGGL(( kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
{
output = cData._SMCE_zeroScale * a;
}
pDelta[pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
hipLaunchKernelGGL(( kCalculateLRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope);
LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
hipLaunchKernelGGL(( kCalculateELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha);
LAUNCHERROR("kCalculateELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
hipLaunchKernelGGL(( kCalculateSELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda);
LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<typename T> void kCalculateIndexedL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateIndexedSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidL1OutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateIndexedTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedTanhL1OutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateIndexedLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateIndexedRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
hipLaunchKernelGGL(( kCalculateIndexedLRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, slope);
LAUNCHERROR("kCalculateIndexedLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
hipLaunchKernelGGL(( kCalculateIndexedELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha);
LAUNCHERROR("kCalculateIndexedELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
hipLaunchKernelGGL(( kCalculateIndexedSELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate sum of activations
if (pos < stride)
{
NNFloat pi = (NNFloat)0.0;
for (int i = 0; i < batch; i++)
{
pi += pUnit[pos];
pos += stride;
}
// Calculate sparseness penalty
pi /= (NNFloat)batch;
pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi));
NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi));
// Apply sparseness penalty to deltas
pos = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < batch; i++)
{
pDelta[pos] += penalty;
pos += stride;
}
}
}
// Calculates and applies sparseness penalty to hidden layers
void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
dim3 grid1(CalculateBlocks(stride));
hipLaunchKernelGGL(( kCalculateSparsenessPenalty_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, batch, stride, pUnit, pDelta, p, beta);
LAUNCHERROR("kCalculateSparsenessPenalty_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
pDelta[pos] = x * ((NNFloat)1.0 - x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] = (NNFloat)0.0;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
{
pDelta[pos] *= slope;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] *= (x + alpha);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat delta = pDelta[pos];
if (x > (NNFloat)0.0)
{
delta *= lambda;
}
else
{
delta *= (x + lambda * alpha);
}
pDelta[pos] = delta;
}
}
void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint32_t blocks = CalculateBlocks(size);
NNFloat oneOverScale = (NNFloat)1.0 / scale;
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateTanhHadamardProduct_kernel");
break;
case Linear:
// Derivative of linear output is 1, no need to call any kernel here
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateRELUHadamardProduct_kernel");
break;
case LeakyRectifiedLinear:
hipLaunchKernelGGL(( kCalculateLRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel");
break;
case ExponentialLinear:
hipLaunchKernelGGL(( kCalculateELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateELUHadamardProduct_kernel");
break;
case ScaledExponentialLinear:
hipLaunchKernelGGL(( kCalculateSELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSELUHadamardProduct_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
REDUCE(r2)
// Normalalize vector if too large
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kNormalizeDeltas_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta);
LAUNCHERROR("kNormalizeDeltas_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
REDUCE(r2)
// Output result
if (tgx == 0)
pMagnitude[dpos] = r2;
}
}
void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kCalculateDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kCalculateDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Normalalize vector if too large
NNFloat r2 = pMagnitude[dpos];
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
uint32_t pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kNormalizeDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat s = pSrc[pos];
NNFloat sdelta = pSrcDelta[pos];
NNFloat d = pDst[pos];
NNFloat delta = (s == d) ? sdelta : (NNFloat)0;
if (beta == (NNFloat)0)
pDstDelta[pos] = delta;
else if (delta != (NNFloat)0.0)
pDstDelta[pos] = beta * pDstDelta[pos] + delta;
}
}
void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
unsigned long blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateMaxoutDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pSrc, pSrcDelta, size, beta, pDst, pDstDelta);
LAUNCHERROR("kCalculateMaxoutDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
// Preincrement pointers
p0Vector += blockIdx.x * inputStride + threadIdx.x;
pVector += blockIdx.x * inputStride + threadIdx.x;
pDPDelta += blockIdx.x * stride;
pDP += blockIdx.x * stride;
pA += blockIdx.x * stride;
pB += blockIdx.x * stride;
pDelta0 += blockIdx.x * inputStride + threadIdx.x;
pDelta += blockIdx.x * inputStride + threadIdx.x;
uint32_t pos = threadIdx.x;
NNFloat dp = *pDP;
NNFloat dpDelta = *pDPDelta;
NNFloat a = *pA;
NNFloat b = *pB;
NNFloat ab = a * b;
NNFloat a2 = a * a;
NNFloat b2 = b * b;
// Calculate deltas
while (pos < inputStride)
{
NNFloat ai = *p0Vector;
NNFloat bi = *pVector;
NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2));
NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2));
if (beta0 == (NNFloat)0)
*pDelta0 = delta0;
else
*pDelta0 = *pDelta0 + beta0 * delta0;
if (beta == (NNFloat)0)
*pDelta = delta;
else
*pDelta = *pDelta + beta * delta;
pDelta0 += blockDim.x;
pDelta += blockDim.x;
p0Vector += blockDim.x;
pVector += blockDim.x;
pos += blockDim.x;
}
}
void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
unsigned long blocks = batch;
unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock);
hipLaunchKernelGGL(( kCalculateCosineDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride);
LAUNCHERROR("kCalculateCosineDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
// Preincrement pointers
p0Vector += blockIdx.x * inputStride + threadIdx.x;
pVector += blockIdx.x * inputStride + threadIdx.x;
pDPDelta += blockIdx.x * stride;
pDelta0 += blockIdx.x * inputStride + threadIdx.x;
pDelta += blockIdx.x * inputStride + threadIdx.x;
uint32_t pos = threadIdx.x;
NNFloat dpDelta = *pDPDelta;
// Calculate deltas
while (pos < inputStride)
{
NNFloat ai = *p0Vector;
NNFloat bi = *pVector;
NNFloat delta0 = dpDelta * bi;
NNFloat delta = dpDelta * ai;
if (beta0 == (NNFloat)0)
*pDelta0 = delta0;
else
*pDelta0 = *pDelta0 + beta0 * delta0;
if (beta == (NNFloat)0)
*pDelta = delta;
else
*pDelta = *pDelta + beta * delta;
pDelta0 += blockDim.x;
pDelta += blockDim.x;
p0Vector += blockDim.x;
pVector += blockDim.x;
pos += blockDim.x;
}
}
void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
unsigned long blocks = batch;
unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock);
hipLaunchKernelGGL(( kCalculateDotProductDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride);
LAUNCHERROR("kCalculateDotProductDelta_kernel");
}
// Instantiates allowable templated functions so we can hide the implementations here
// instead of in the header file because we're mixing CUDA and C++ and that's
// a migraine headache in the making otherwise.
#define EXPLICITLY_INSTANTIATE_KERNELS(T) \
template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \
template void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \
template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \
/**/
EXPLICITLY_INSTANTIATE_KERNELS(NNFloat)
EXPLICITLY_INSTANTIATE_KERNELS(double)
EXPLICITLY_INSTANTIATE_KERNELS(unsigned char)
EXPLICITLY_INSTANTIATE_KERNELS(char)
EXPLICITLY_INSTANTIATE_KERNELS(uint32_t)
EXPLICITLY_INSTANTIATE_KERNELS(uint64_t)
EXPLICITLY_INSTANTIATE_KERNELS(int32_t)
EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
| f45eb0684205c24c132e2c2261f4d719f042e412.cu | /*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
void SetKDeltaGpuData()
{
cudaError_t status;
status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "cudaMemcpyToSymbol: SetKDeltaGpuData copy to cData failed");
}
void GetKDeltaGpuData()
{
cudaError_t status;
status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "cudaMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel");
break;
case Tanh:
kCalculateTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhOutputDelta_kernel");
break;
case Linear:
kCalculateLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearOutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
kCalculateLRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope);
LAUNCHERROR("kCalculateLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
kCalculateELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha);
LAUNCHERROR("kCalculateELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
kCalculateSELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda);
LAUNCHERROR("kCalculateSELUOutputDelta_kernel");
break;
case SoftMax:
kCalculateSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a)));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateIndexedOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateIndexedSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidOutputDelta_kernel");
break;
case Tanh:
kCalculateIndexedTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedTanhOutputDelta_kernel");
break;
case Linear:
kCalculateIndexedLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedLinearOutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateIndexedRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
kCalculateIndexedLRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, slope);
LAUNCHERROR("kCalculateIndexedLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
kCalculateIndexedELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha);
LAUNCHERROR("kCalculateIndexedELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
kCalculateIndexedSELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSELUOutputDelta_kernel");
break;
case SoftMax:
kCalculateIndexedSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = pData[pos];
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock));
kCalculateHingeOutputDelta_kernel<<<batch, threads>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateHingeOutputDelta_kernel");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = pData[pos];
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = threadIdx.x;
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
pUnit += uOffset;
pDelta += uOffset;
pData += dOffset;
while (pos < stride)
{
NNFloat a = pUnit[pos];
NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0);
pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0;
pos += blockDim.x;
}
}
template<typename T> void kCalculateIndexedHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock));
kCalculateIndexedHingeOutputDelta_kernel<<<batch, threads>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateHingeOutputDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
kCalculateSparseNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
kCalculateSparseNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
kCalculateSparseNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
kCalculateSparseNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateIndexedSparseNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha);
LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
kCalculateSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T>
void kCalculateIndexedCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
{
output = cData._SMCE_zeroScale * a;
}
pDelta[pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel");
break;
case Tanh:
kCalculateTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel");
break;
case Linear:
kCalculateLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
kCalculateLRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope);
LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
kCalculateELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha);
LAUNCHERROR("kCalculateELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
kCalculateSELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda);
LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
template<typename T> void kCalculateIndexedL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateIndexedSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedSigmoidL1OutputDelta_kernel");
break;
case Tanh:
kCalculateIndexedTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedTanhL1OutputDelta_kernel");
break;
case Linear:
kCalculateIndexedLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateIndexedRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData);
LAUNCHERROR("kCalculateIndexedRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
kCalculateIndexedLRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, slope);
LAUNCHERROR("kCalculateIndexedLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
kCalculateIndexedELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha);
LAUNCHERROR("kCalculateIndexedELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
kCalculateIndexedSELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel");
}
kCalculateSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel");
}
kCalculateSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel");
}
kCalculateSparseNonZeroRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel");
}
kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel");
}
kCalculateSparseNonZeroELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel");
}
kCalculateSparseNonZeroSELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a));
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos];
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope);
pos1 += cData._warpSize;
}
}
}
void kCalculateIndexedSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel");
break;
case LeakyRectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, slope);
LAUNCHERROR("kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel");
break;
case ExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha);
LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel");
break;
case ScaledExponentialLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel");
}
kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda);
LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate sum of activations
if (pos < stride)
{
NNFloat pi = (NNFloat)0.0;
for (int i = 0; i < batch; i++)
{
pi += pUnit[pos];
pos += stride;
}
// Calculate sparseness penalty
pi /= (NNFloat)batch;
pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi));
NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi));
// Apply sparseness penalty to deltas
pos = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < batch; i++)
{
pDelta[pos] += penalty;
pos += stride;
}
}
}
// Calculates and applies sparseness penalty to hidden layers
void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
dim3 grid1(CalculateBlocks(stride));
kCalculateSparsenessPenalty_kernel<<<grid1, getGpu()._threadsPerBlock>>>(batch, stride, pUnit, pDelta, p, beta);
LAUNCHERROR("kCalculateSparsenessPenalty_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
pDelta[pos] = x * ((NNFloat)1.0 - x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] = (NNFloat)0.0;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
{
pDelta[pos] *= slope;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] *= (x + alpha);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat delta = pDelta[pos];
if (x > (NNFloat)0.0)
{
delta *= lambda;
}
else
{
delta *= (x + lambda * alpha);
}
pDelta[pos] = delta;
}
}
void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda)
{
uint32_t blocks = CalculateBlocks(size);
NNFloat oneOverScale = (NNFloat)1.0 / scale;
switch (activation)
{
case Sigmoid:
kCalculateSigmoidHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel");
break;
case Tanh:
kCalculateTanhHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateTanhHadamardProduct_kernel");
break;
case Linear:
// Derivative of linear output is 1, no need to call any kernel here
break;
case RectifiedLinear:
kCalculateRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateRELUHadamardProduct_kernel");
break;
case LeakyRectifiedLinear:
kCalculateLRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope);
LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel");
break;
case ExponentialLinear:
kCalculateELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha);
LAUNCHERROR("kCalculateELUHadamardProduct_kernel");
break;
case ScaledExponentialLinear:
kCalculateSELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda);
LAUNCHERROR("kCalculateSELUHadamardProduct_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
REDUCE(r2)
// Normalalize vector if too large
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t blocks = (batch + 3) / 4;
kNormalizeDeltas_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta);
LAUNCHERROR("kNormalizeDeltas_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
REDUCE(r2)
// Output result
if (tgx == 0)
pMagnitude[dpos] = r2;
}
}
void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
kCalculateDeltaMagnitudes_kernel<<<blocks, 128>>>(batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kCalculateDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Normalalize vector if too large
NNFloat r2 = pMagnitude[dpos];
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
uint32_t pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
kNormalizeDeltaMagnitudes_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat s = pSrc[pos];
NNFloat sdelta = pSrcDelta[pos];
NNFloat d = pDst[pos];
NNFloat delta = (s == d) ? sdelta : (NNFloat)0;
if (beta == (NNFloat)0)
pDstDelta[pos] = delta;
else if (delta != (NNFloat)0.0)
pDstDelta[pos] = beta * pDstDelta[pos] + delta;
}
}
void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
unsigned long blocks = CalculateBlocks(size);
kCalculateMaxoutDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pSrc, pSrcDelta, size, beta, pDst, pDstDelta);
LAUNCHERROR("kCalculateMaxoutDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
// Preincrement pointers
p0Vector += blockIdx.x * inputStride + threadIdx.x;
pVector += blockIdx.x * inputStride + threadIdx.x;
pDPDelta += blockIdx.x * stride;
pDP += blockIdx.x * stride;
pA += blockIdx.x * stride;
pB += blockIdx.x * stride;
pDelta0 += blockIdx.x * inputStride + threadIdx.x;
pDelta += blockIdx.x * inputStride + threadIdx.x;
uint32_t pos = threadIdx.x;
NNFloat dp = *pDP;
NNFloat dpDelta = *pDPDelta;
NNFloat a = *pA;
NNFloat b = *pB;
NNFloat ab = a * b;
NNFloat a2 = a * a;
NNFloat b2 = b * b;
// Calculate deltas
while (pos < inputStride)
{
NNFloat ai = *p0Vector;
NNFloat bi = *pVector;
NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2));
NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2));
if (beta0 == (NNFloat)0)
*pDelta0 = delta0;
else
*pDelta0 = *pDelta0 + beta0 * delta0;
if (beta == (NNFloat)0)
*pDelta = delta;
else
*pDelta = *pDelta + beta * delta;
pDelta0 += blockDim.x;
pDelta += blockDim.x;
p0Vector += blockDim.x;
pVector += blockDim.x;
pos += blockDim.x;
}
}
void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
unsigned long blocks = batch;
unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock);
kCalculateCosineDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride);
LAUNCHERROR("kCalculateCosineDelta_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
// Preincrement pointers
p0Vector += blockIdx.x * inputStride + threadIdx.x;
pVector += blockIdx.x * inputStride + threadIdx.x;
pDPDelta += blockIdx.x * stride;
pDelta0 += blockIdx.x * inputStride + threadIdx.x;
pDelta += blockIdx.x * inputStride + threadIdx.x;
uint32_t pos = threadIdx.x;
NNFloat dpDelta = *pDPDelta;
// Calculate deltas
while (pos < inputStride)
{
NNFloat ai = *p0Vector;
NNFloat bi = *pVector;
NNFloat delta0 = dpDelta * bi;
NNFloat delta = dpDelta * ai;
if (beta0 == (NNFloat)0)
*pDelta0 = delta0;
else
*pDelta0 = *pDelta0 + beta0 * delta0;
if (beta == (NNFloat)0)
*pDelta = delta;
else
*pDelta = *pDelta + beta * delta;
pDelta0 += blockDim.x;
pDelta += blockDim.x;
p0Vector += blockDim.x;
pVector += blockDim.x;
pos += blockDim.x;
}
}
void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride)
{
unsigned long blocks = batch;
unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock);
kCalculateDotProductDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride);
LAUNCHERROR("kCalculateDotProductDelta_kernel");
}
// Instantiates allowable templated functions so we can hide the implementations here
// instead of in the header file because we're mixing CUDA and C++ and that's
// a migraine headache in the making otherwise.
#define EXPLICITLY_INSTANTIATE_KERNELS(T) \
template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \
template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \
template void kCalculateIndexedHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \
template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \
template void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \
template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \
template void kCalculateIndexedSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \
/**/
EXPLICITLY_INSTANTIATE_KERNELS(NNFloat)
EXPLICITLY_INSTANTIATE_KERNELS(double)
EXPLICITLY_INSTANTIATE_KERNELS(unsigned char)
EXPLICITLY_INSTANTIATE_KERNELS(char)
EXPLICITLY_INSTANTIATE_KERNELS(uint32_t)
EXPLICITLY_INSTANTIATE_KERNELS(uint64_t)
EXPLICITLY_INSTANTIATE_KERNELS(int32_t)
EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
|
f9222fede194c06789de47e7a7f8b205129864db.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "bin_reader.h"
#include "timer.h"
#include "hip/hip_runtime.h"
#define CHECK(exp) \
do { \
if (exp != 0) { \
printf("Runtime error at line %d\n", __LINE__); \
exit(-1); \
} \
} while(0)
#define CUCHECK(exp) \
do { \
if (exp != hipSuccess) { \
printf("CUDA runtime error at line %d\n", __LINE__); \
exit(-1); \
} \
} while(0)
__global__ void vectorAddA(float * vectorA, float * vectorB, float * vectorC, unsigned int count) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
if (threadId < count)
vectorC[threadId] = vectorA[threadId] + vectorB[threadId];
}
__global__ void vectorAddB(float * vectorA, float * vectorB, float * vectorC, unsigned int count) {
//Play with various versions of kernels (depending on threads and vice versa WBB)
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
int stride = count / totalThreads;
int remainder = count % totalThreads;
stride += (remainder) ? 1 : 0;
for (int i=0;i<stride;i++) {
unsigned long idx = (i * totalThreads) + threadId;
if (idx < count)
vectorC[idx] = vectorA[idx] + vectorB[idx];
}
}
int main(int argc, char ** argv) {
float * veca_h, * vecb_h, * vecc_h, * vecc_hv;
float * veca_d, * vecb_d, * vecc_d;
size_t count;
//Loading vector-A and -B from file for calculation on GPU
//Loading vector-C from file for verifying the results from GPU
CHECK( binReadAsArrayNP<float>("vecA.bin", NULL, &veca_h, &count));
CHECK( binReadAsArrayNP<float>("vecB.bin", NULL, &vecb_h, &count));
CHECK( binReadAsArrayNP<float>("vecC.bin", NULL, &vecc_hv, &count));
vecc_h = new float [count];
//Allocate memory on GPU for vectors (WBB)
CUCHECK( hipMalloc(&veca_d, count * sizeof(float)));
CUCHECK( hipMalloc(&vecb_d, count * sizeof(float)));
CUCHECK( hipMalloc(&vecc_d, count * sizeof(float)));
//Copy vector-A and -B from the host to the device (WBB)
CUCHECK( hipMemcpy(veca_d, veca_h, count * sizeof(float), hipMemcpyHostToDevice));
CUCHECK( hipMemcpy(vecb_d, vecb_h, count * sizeof(float), hipMemcpyHostToDevice));
CUCHECK( hipMemset(vecc_d, 0, count * sizeof(float)));
//Perform the vector addition by calling the kernel
//Play with scaling and configuration (WBB)
Timer compTimer;
compTimer.Start();
hipLaunchKernelGGL(( vectorAddB), dim3(128), dim3(256), 0, 0, veca_d, vecb_d, vecc_d, count);
hipDeviceSynchronize();
compTimer.Stop();
Timer::Duration d = compTimer.GetDuration();
printf("Computation time: %.2f us\n", d.raw);
//Copy vector-C which is the result back from GPU
CUCHECK( hipMemcpy(vecc_h, vecc_d, count * sizeof(float), hipMemcpyDeviceToHost));
//Verification
for (size_t idx=0;idx<count;idx++) {
if (vecc_h[idx] != vecc_hv[idx]) {
printf("Verification: FAILED (%ld)\n", idx);
exit(-1);
}
}
printf("Verification: PASSED\n");
//Release resources after completed calculation (WBB)
CHECK( binDiscardArrayNP(veca_h));
CHECK( binDiscardArrayNP(vecb_h));
CHECK( binDiscardArrayNP(vecc_hv));
CUCHECK( hipFree(veca_d));
CUCHECK( hipFree(vecb_d));
CUCHECK( hipFree(vecc_d));
return 0;
}
| f9222fede194c06789de47e7a7f8b205129864db.cu | #include <stdio.h>
#include <stdlib.h>
#include "bin_reader.h"
#include "timer.h"
#include "cuda.h"
#define CHECK(exp) \
do { \
if (exp != 0) { \
printf("Runtime error at line %d\n", __LINE__); \
exit(-1); \
} \
} while(0)
#define CUCHECK(exp) \
do { \
if (exp != cudaSuccess) { \
printf("CUDA runtime error at line %d\n", __LINE__); \
exit(-1); \
} \
} while(0)
__global__ void vectorAddA(float * vectorA, float * vectorB, float * vectorC, unsigned int count) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
if (threadId < count)
vectorC[threadId] = vectorA[threadId] + vectorB[threadId];
}
__global__ void vectorAddB(float * vectorA, float * vectorB, float * vectorC, unsigned int count) {
//Play with various versions of kernels (depending on threads and vice versa WBB)
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
int stride = count / totalThreads;
int remainder = count % totalThreads;
stride += (remainder) ? 1 : 0;
for (int i=0;i<stride;i++) {
unsigned long idx = (i * totalThreads) + threadId;
if (idx < count)
vectorC[idx] = vectorA[idx] + vectorB[idx];
}
}
int main(int argc, char ** argv) {
float * veca_h, * vecb_h, * vecc_h, * vecc_hv;
float * veca_d, * vecb_d, * vecc_d;
size_t count;
//Loading vector-A and -B from file for calculation on GPU
//Loading vector-C from file for verifying the results from GPU
CHECK( binReadAsArrayNP<float>("vecA.bin", NULL, &veca_h, &count));
CHECK( binReadAsArrayNP<float>("vecB.bin", NULL, &vecb_h, &count));
CHECK( binReadAsArrayNP<float>("vecC.bin", NULL, &vecc_hv, &count));
vecc_h = new float [count];
//Allocate memory on GPU for vectors (WBB)
CUCHECK( cudaMalloc(&veca_d, count * sizeof(float)));
CUCHECK( cudaMalloc(&vecb_d, count * sizeof(float)));
CUCHECK( cudaMalloc(&vecc_d, count * sizeof(float)));
//Copy vector-A and -B from the host to the device (WBB)
CUCHECK( cudaMemcpy(veca_d, veca_h, count * sizeof(float), cudaMemcpyHostToDevice));
CUCHECK( cudaMemcpy(vecb_d, vecb_h, count * sizeof(float), cudaMemcpyHostToDevice));
CUCHECK( cudaMemset(vecc_d, 0, count * sizeof(float)));
//Perform the vector addition by calling the kernel
//Play with scaling and configuration (WBB)
Timer compTimer;
compTimer.Start();
vectorAddB<<<128, 256>>>(veca_d, vecb_d, vecc_d, count);
cudaDeviceSynchronize();
compTimer.Stop();
Timer::Duration d = compTimer.GetDuration();
printf("Computation time: %.2f us\n", d.raw);
//Copy vector-C which is the result back from GPU
CUCHECK( cudaMemcpy(vecc_h, vecc_d, count * sizeof(float), cudaMemcpyDeviceToHost));
//Verification
for (size_t idx=0;idx<count;idx++) {
if (vecc_h[idx] != vecc_hv[idx]) {
printf("Verification: FAILED (%ld)\n", idx);
exit(-1);
}
}
printf("Verification: PASSED\n");
//Release resources after completed calculation (WBB)
CHECK( binDiscardArrayNP(veca_h));
CHECK( binDiscardArrayNP(vecb_h));
CHECK( binDiscardArrayNP(vecc_hv));
CUCHECK( cudaFree(veca_d));
CUCHECK( cudaFree(vecb_d));
CUCHECK( cudaFree(vecc_d));
return 0;
}
|
7052c4f42a75f24d93af9f7d115067c2023f4f51.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(__half) * size);
hipMalloc((void**)(&device_b), sizeof(__half) * size);
hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x6c00; host_a[0]=fp16.f;
fp16.i = 0x6c00; host_a[1]=fp16.f;
fp16.i = 0x6c00; host_a[2]=fp16.f;
fp16.i = 0x6c00; host_a[3]=fp16.f;
fp16.i = 0x6c00; host_a[4]=fp16.f;
fp16.i = 0x6c00; host_a[5]=fp16.f;
fp16.i = 0x6c00; host_a[6]=fp16.f;
fp16.i = 0x6c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x47000000; host_c[0]=fp32.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b, device_c);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
| 7052c4f42a75f24d93af9f7d115067c2023f4f51.cu | #include <iostream>
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(__half) * size);
cudaMalloc((void**)(&device_b), sizeof(__half) * size);
cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x6c00; host_a[0]=fp16.f;
fp16.i = 0x6c00; host_a[1]=fp16.f;
fp16.i = 0x6c00; host_a[2]=fp16.f;
fp16.i = 0x6c00; host_a[3]=fp16.f;
fp16.i = 0x6c00; host_a[4]=fp16.f;
fp16.i = 0x6c00; host_a[5]=fp16.f;
fp16.i = 0x6c00; host_a[6]=fp16.f;
fp16.i = 0x6c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x47000000; host_c[0]=fp32.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
test<<<1,32>>>(device_d, device_a, device_b, device_c);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
7bebbf784ac0bc9ab4ed9e7f8a2255433184b684.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include "compute_hist_loop_one_stat.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int Bits,
int BlockSize>
struct TPointHistOneByte {
const int InnerHistBitsCount = Bits - 5;
float* Histogram;
static constexpr int GetHistSize() {
return BlockSize * 32;
}
static constexpr int AddPointsBatchSize() {
return TLoadSize<LoadSize()>::Size();
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 700
const int NN = 2;
#else
const int NN = 4;
#endif
return NN;
}
static constexpr int GetBlockSize() {
return BlockSize;
}
static constexpr ELoadSize LoadSize() {
#if __CUDA_ARCH__ < 500
return ELoadSize::OneElement;
#else
return ELoadSize::FourElements;
// return ELoadSize::TwoElements;
#endif
}
static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) {
return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType);
}
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* hist) {
static_assert(Bits >= 5, "Error: this hist is for 5-8 bits");
const int histSize = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
hist[i] = 0;
}
Histogram = hist + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = (threadIdx.x + i) & 3;
int bin = (ci >> (24 - 8 * f)) & 255;
// int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> Bits) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Histogram[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Histogram[offset] += statToAdd;
}
}
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + i) & 3;
int bins[N];
float stats[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = (ci[k] >> (24 - 8 * f)) & 255;
// bins[k] = bfe(ci[k], 24 - 8 * f, 8);
stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f;
}
int offsets[N];
int higherBin[N];
const int mask = (1 << InnerHistBitsCount) - 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
higherBin[k] = (bins[k] >> 5) & mask;
offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5);
}
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
if (pass == higherBin[j]) {
Histogram[offsets[j]] += stats[j];
}
}
}
} else {
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
Histogram[offsets[j]] += stats[j];
}
}
}
}
template <int N>
__forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) {
const int NN = AddPointsBatchSize();
static_assert(N % NN == 0, "Error: incorrect stripe size");
#pragma unroll
for (int k = 0; k < N; k += NN) {
AddPointsImpl<NN>(ci + k, t + k);
}
}
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Histogram[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
TFeatureInBlock group = features[fid];
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup;
if (fold < features[fid].Folds) {
const float val = Histogram[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
/*
* Single part
*/
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
}
| 7bebbf784ac0bc9ab4ed9e7f8a2255433184b684.cu | #include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include "compute_hist_loop_one_stat.cuh"
#include <cooperative_groups.h>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int Bits,
int BlockSize>
struct TPointHistOneByte {
const int InnerHistBitsCount = Bits - 5;
float* Histogram;
static constexpr int GetHistSize() {
return BlockSize * 32;
}
static constexpr int AddPointsBatchSize() {
return TLoadSize<LoadSize()>::Size();
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 700
const int NN = 2;
#else
const int NN = 4;
#endif
return NN;
}
static constexpr int GetBlockSize() {
return BlockSize;
}
static constexpr ELoadSize LoadSize() {
#if __CUDA_ARCH__ < 500
return ELoadSize::OneElement;
#else
return ELoadSize::FourElements;
// return ELoadSize::TwoElements;
#endif
}
static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) {
return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType);
}
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* hist) {
static_assert(Bits >= 5, "Error: this hist is for 5-8 bits");
const int histSize = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
hist[i] = 0;
}
Histogram = hist + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = (threadIdx.x + i) & 3;
int bin = (ci >> (24 - 8 * f)) & 255;
// int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> Bits) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Histogram[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Histogram[offset] += statToAdd;
}
}
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + i) & 3;
int bins[N];
float stats[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = (ci[k] >> (24 - 8 * f)) & 255;
// bins[k] = bfe(ci[k], 24 - 8 * f, 8);
stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f;
}
int offsets[N];
int higherBin[N];
const int mask = (1 << InnerHistBitsCount) - 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
higherBin[k] = (bins[k] >> 5) & mask;
offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5);
}
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
if (pass == higherBin[j]) {
Histogram[offsets[j]] += stats[j];
}
}
}
} else {
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
Histogram[offsets[j]] += stats[j];
}
}
}
}
template <int N>
__forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) {
const int NN = AddPointsBatchSize();
static_assert(N % NN == 0, "Error: incorrect stripe size");
#pragma unroll
for (int k = 0; k < N; k += NN) {
AddPointsImpl<NN>(ci + k, t + k);
}
}
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Histogram[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
TFeatureInBlock group = features[fid];
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup;
if (fold < features[fid].Folds) {
const float val = Histogram[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
/*
* Single part
*/
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
}
|
0239acbb93ac0637dacc8126ee1e0914027ec804.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nms.h"
#include "utils.h"
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include <cmath>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace def_retinanet {
namespace cuda {
__global__ void nms_kernel(
const int num_per_thread, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++) {
for (int n = 0; n < num_per_thread; n++) {
int i = threadIdx.x * num_per_thread + n;
if (i < num_detections && m < i && scores[m] > 0.0f) {
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls) {
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1 + 1);
float h = max(0.0f, y2 - y1 + 1);
float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1);
float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
if (overlap > threshold) {
scores[i] = 0.0f;
}
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, hipStream_t stream) {
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(count),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, count);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::hip::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count,
flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
hipStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float)num_detections / max_threads);
hipLaunchKernelGGL(( nms_kernel), dim3(1), dim3(max_threads), 0, stream, num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted, indices, num_detections, 0, sizeof(*scores)*8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
hipMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, hipMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im) {
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
}
}
| 0239acbb93ac0637dacc8126ee1e0914027ec804.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nms.h"
#include "utils.h"
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include <cmath>
#include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace def_retinanet {
namespace cuda {
__global__ void nms_kernel(
const int num_per_thread, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++) {
for (int n = 0; n < num_per_thread; n++) {
int i = threadIdx.x * num_per_thread + n;
if (i < num_detections && m < i && scores[m] > 0.0f) {
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls) {
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1 + 1);
float h = max(0.0f, y2 - y1 + 1);
float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1);
float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
if (overlap > threshold) {
scores[i] = 0.0f;
}
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, cudaStream_t stream) {
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(count),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, count);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::cuda::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count,
flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
cudaStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float)num_detections / max_threads);
nms_kernel<<<1, max_threads, 0, stream>>>(num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted, indices, num_detections, 0, sizeof(*scores)*8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
cudaMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, cudaMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im) {
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
}
}
|
6ce4874f6716d3273e6c3e2061e5f4dc170750fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include <optix_device.h>
#include "LaunchParams.h"
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
// for this simple example, we have a single ray type
enum { SURFACE_RAY_TYPE=0, RAY_TYPE_COUNT };
static __forceinline__ __device__
void *unpackPointer( uint32_t i0, uint32_t i1 )
{
const uint64_t uptr = static_cast<uint64_t>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__
void packPointer( void* ptr, uint32_t& i0, uint32_t& i1 )
{
const uint64_t uptr = reinterpret_cast<uint64_t>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
template<typename T>
static __forceinline__ __device__ T *getPRD()
{
const uint32_t u0 = optixGetPayload_0();
const uint32_t u1 = optixGetPayload_1();
return reinterpret_cast<T*>( unpackPointer( u0, u1 ) );
}
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// compute normal:
const int primID = optixGetPrimitiveIndex();
const vec3i index = sbtData.index[primID];
const vec3f &A = sbtData.vertex[index.x];
const vec3f &B = sbtData.vertex[index.y];
const vec3f &C = sbtData.vertex[index.z];
const vec3f Ng = normalize(cross(B-A,C-A));
const vec3f rayDir = optixGetWorldRayDirection();
const float cosDN = 0.2f + .8f*fabsf(dot(rayDir,Ng));
vec3f &prd = *(vec3f*)getPRD<vec3f>();
prd = cosDN * sbtData.color;
}
extern "C" __global__ void __anyhit__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
vec3f &prd = *(vec3f*)getPRD<vec3f>();
// set to constant white as background color
prd = vec3f(1.f);
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
// our per-ray data for this example. what we initialize it to
// won't matter, since this value will be overwritten by either
// the miss or hit program, anyway
vec3f pixelColorPRD = vec3f(0.f);
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer( &pixelColorPRD, u0, u1 );
// normalized screen plane position, in [0,1]^2
const vec2f screen(vec2f(ix+.5f,iy+.5f)
/ vec2f(optixLaunchParams.frame.size));
// generate ray direction
vec3f rayDir = normalize(camera.direction
+ (screen.x - 0.5f) * camera.horizontal
+ (screen.y - 0.5f) * camera.vertical);
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_DISABLE_ANYHIT,//OPTIX_RAY_FLAG_NONE,
SURFACE_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SURFACE_RAY_TYPE, // missSBTIndex
u0, u1 );
const int r = int(255.99f*pixelColorPRD.x);
const int g = int(255.99f*pixelColorPRD.y);
const int b = int(255.99f*pixelColorPRD.z);
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// and write to frame buffer ...
const uint32_t fbIndex = ix+iy*optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
} // ::osc
| 6ce4874f6716d3273e6c3e2061e5f4dc170750fa.cu | // ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include <optix_device.h>
#include "LaunchParams.h"
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
// for this simple example, we have a single ray type
enum { SURFACE_RAY_TYPE=0, RAY_TYPE_COUNT };
static __forceinline__ __device__
void *unpackPointer( uint32_t i0, uint32_t i1 )
{
const uint64_t uptr = static_cast<uint64_t>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__
void packPointer( void* ptr, uint32_t& i0, uint32_t& i1 )
{
const uint64_t uptr = reinterpret_cast<uint64_t>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
template<typename T>
static __forceinline__ __device__ T *getPRD()
{
const uint32_t u0 = optixGetPayload_0();
const uint32_t u1 = optixGetPayload_1();
return reinterpret_cast<T*>( unpackPointer( u0, u1 ) );
}
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// compute normal:
const int primID = optixGetPrimitiveIndex();
const vec3i index = sbtData.index[primID];
const vec3f &A = sbtData.vertex[index.x];
const vec3f &B = sbtData.vertex[index.y];
const vec3f &C = sbtData.vertex[index.z];
const vec3f Ng = normalize(cross(B-A,C-A));
const vec3f rayDir = optixGetWorldRayDirection();
const float cosDN = 0.2f + .8f*fabsf(dot(rayDir,Ng));
vec3f &prd = *(vec3f*)getPRD<vec3f>();
prd = cosDN * sbtData.color;
}
extern "C" __global__ void __anyhit__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
vec3f &prd = *(vec3f*)getPRD<vec3f>();
// set to constant white as background color
prd = vec3f(1.f);
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
// our per-ray data for this example. what we initialize it to
// won't matter, since this value will be overwritten by either
// the miss or hit program, anyway
vec3f pixelColorPRD = vec3f(0.f);
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer( &pixelColorPRD, u0, u1 );
// normalized screen plane position, in [0,1]^2
const vec2f screen(vec2f(ix+.5f,iy+.5f)
/ vec2f(optixLaunchParams.frame.size));
// generate ray direction
vec3f rayDir = normalize(camera.direction
+ (screen.x - 0.5f) * camera.horizontal
+ (screen.y - 0.5f) * camera.vertical);
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_DISABLE_ANYHIT,//OPTIX_RAY_FLAG_NONE,
SURFACE_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SURFACE_RAY_TYPE, // missSBTIndex
u0, u1 );
const int r = int(255.99f*pixelColorPRD.x);
const int g = int(255.99f*pixelColorPRD.y);
const int b = int(255.99f*pixelColorPRD.z);
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// and write to frame buffer ...
const uint32_t fbIndex = ix+iy*optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
} // ::osc
|
43a861c676e1a571ebdd9d8d109103fe59624aba.hip | // !!! This is a file automatically generated by hipify!!!
#include "config.h"
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
texture<float,1,hipReadModeElementType> tex_mx;
texture<float,1,hipReadModeElementType> tex_my;
texture<float,1,hipReadModeElementType> tex_mz;
texture<float,1,hipReadModeElementType> tex_energy;
texture<float,1,hipReadModeElementType> tex_density;
texture<int,1,hipReadModeElementType> tex_neighbor;
texture<float,1,hipReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i); //mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = tex1Dfetch(tex_mx,nb);//mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
hipBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| 43a861c676e1a571ebdd9d8d109103fe59624aba.cu | #include "config.h"
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
texture<float,1,cudaReadModeElementType> tex_mx;
texture<float,1,cudaReadModeElementType> tex_my;
texture<float,1,cudaReadModeElementType> tex_mz;
texture<float,1,cudaReadModeElementType> tex_energy;
texture<float,1,cudaReadModeElementType> tex_density;
texture<int,1,cudaReadModeElementType> tex_neighbor;
texture<float,1,cudaReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i); //mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = tex1Dfetch(tex_mx,nb);//mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
cudaBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
47bb6ae15f991aeb059e84142c6173fc9b6c4129.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
//
#include "saiga/core/time/all.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/reduce.h"
#include "saiga/vision/features/Features.h"
#include "saiga/vision/features/OrbPattern.h"
#include "OrbDescriptors.h"
namespace Saiga
{
namespace CUDA
{
const int HALF_PATCH_SIZE = 15;
__constant__ unsigned char c_pattern[sizeof(int2) * 512];
__constant__ int c_u_max[32];
ORB::ORB()
{
auto pattern = Saiga::ORBPattern::DescriptorPattern();
static_assert(sizeof(Saiga::ivec2) == 2 * sizeof(int), "laksdf");
CHECK_CUDA_ERROR(hipMemcpyToSymbol(c_pattern, pattern.data(), sizeof(Saiga::ivec2) * pattern.size()));
auto u_max = Saiga::ORBPattern::AngleUmax();
CHECK_CUDA_ERROR(hipMemcpyToSymbol(c_u_max, u_max.data(), u_max.size() * sizeof(int)));
}
__global__ void calcOrb_kernel(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> descriptors)
{
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= keypoints.size()) return;
__shared__ unsigned char result[32];
const auto& kpt = keypoints[id];
float2 loc = {kpt.point(0), kpt.point(1)};
const auto* pattern = ((int2*)c_pattern) + 16 * tid;
unsigned char* desc = (unsigned char*)&descriptors[id];
const float factorPI = (float)(M_PI / 180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
auto GET_VALUE = [&](int idx) -> int {
int2 pat = pattern[idx];
float fx = loc.x + (pat.x * a - pat.y * b);
float fy = loc.y + (pat.x * b + pat.y * a);
// int x = __float2int_rn(fx);
// int y = __float2int_rn(fy);
// image.mirrorToEdge(y, x);
// CUDA_ASSERT(image.inImage(y, x));
// return image(y, x);
return tex2D<unsigned char>(tex, fx + 0.5, fy + 0.5);
};
t0 = GET_VALUE(0);
t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2);
t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4);
t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6);
t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8);
t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10);
t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12);
t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14);
t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
result[threadIdx.x] = (unsigned char)val;
if (threadIdx.x < 8)
{
auto data_int = (int*)result;
((int*)desc)[threadIdx.x] = data_int[threadIdx.x];
}
}
void ORB::ComputeDescriptors(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> _descriptors, hipStream_t stream)
{
if (_keypoints.empty())
{
return;
}
SAIGA_ASSERT(_keypoints.size() == _descriptors.size());
hipLaunchKernelGGL(( calcOrb_kernel), dim3(_keypoints.size()), dim3(32), 0, stream, tex, image, _keypoints, _descriptors);
}
__global__ void IC_Angle_kernel(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx >= keypoints.size()) return;
int m_01 = 0, m_10 = 0;
const int2 loc = make_int2(keypoints[ptidx].point(0), keypoints[ptidx].point(1));
// Treat the center line differently, v=0
for (int u = threadIdx.x - HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; u += blockDim.x)
{
m_10 += u * tex2D<unsigned char>(tex, loc.x + u, loc.y);
}
m_10 = Saiga::CUDA::warpReduceSum<int, 32, false>(m_10);
for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
// int val_plus = image(loc.y + v, loc.x + u);
// int val_minus = image(loc.y - v, loc.x + u);
int val_plus = tex2D<unsigned char>(tex, loc.x + u, loc.y + v);
int val_minus = tex2D<unsigned char>(tex, loc.x + u, loc.y - v);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
m_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(m_sum);
v_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(v_sum);
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = atan2((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * float(M_PI));
kp_dir *= 180.0f / float(M_PI);
keypoints[ptidx].angle = kp_dir;
}
}
__global__ void addBorder_kernel(Saiga::KeyPoint<float>* keypoints, int npoints, int minBorderX, int minBorderY,
int octave, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npoints)
{
return;
}
keypoints[tid].point(0) += minBorderX;
keypoints[tid].point(1) += minBorderY;
keypoints[tid].octave = octave;
keypoints[tid].size = size;
}
void ORB::ComputeAngles(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints, int minBorderX, int minBorderY, int octave,
int size, hipStream_t stream)
{
if (_keypoints.empty())
{
return;
}
{
dim3 block(256);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.x));
hipLaunchKernelGGL(( addBorder_kernel), dim3(grid), dim3(block), 0, stream, _keypoints.data(), _keypoints.size(), minBorderX, minBorderY,
octave, size);
}
{
dim3 block(32, 8);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.y));
hipLaunchKernelGGL(( IC_Angle_kernel), dim3(grid), dim3(block), 0, stream, tex, image, _keypoints);
}
}
} // namespace CUDA
} // namespace Saiga
| 47bb6ae15f991aeb059e84142c6173fc9b6c4129.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
//
#include "saiga/core/time/all.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/reduce.h"
#include "saiga/vision/features/Features.h"
#include "saiga/vision/features/OrbPattern.h"
#include "OrbDescriptors.h"
namespace Saiga
{
namespace CUDA
{
const int HALF_PATCH_SIZE = 15;
__constant__ unsigned char c_pattern[sizeof(int2) * 512];
__constant__ int c_u_max[32];
ORB::ORB()
{
auto pattern = Saiga::ORBPattern::DescriptorPattern();
static_assert(sizeof(Saiga::ivec2) == 2 * sizeof(int), "laksdf");
CHECK_CUDA_ERROR(cudaMemcpyToSymbol(c_pattern, pattern.data(), sizeof(Saiga::ivec2) * pattern.size()));
auto u_max = Saiga::ORBPattern::AngleUmax();
CHECK_CUDA_ERROR(cudaMemcpyToSymbol(c_u_max, u_max.data(), u_max.size() * sizeof(int)));
}
__global__ void calcOrb_kernel(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> descriptors)
{
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= keypoints.size()) return;
__shared__ unsigned char result[32];
const auto& kpt = keypoints[id];
float2 loc = {kpt.point(0), kpt.point(1)};
const auto* pattern = ((int2*)c_pattern) + 16 * tid;
unsigned char* desc = (unsigned char*)&descriptors[id];
const float factorPI = (float)(M_PI / 180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
auto GET_VALUE = [&](int idx) -> int {
int2 pat = pattern[idx];
float fx = loc.x + (pat.x * a - pat.y * b);
float fy = loc.y + (pat.x * b + pat.y * a);
// int x = __float2int_rn(fx);
// int y = __float2int_rn(fy);
// image.mirrorToEdge(y, x);
// CUDA_ASSERT(image.inImage(y, x));
// return image(y, x);
return tex2D<unsigned char>(tex, fx + 0.5, fy + 0.5);
};
t0 = GET_VALUE(0);
t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2);
t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4);
t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6);
t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8);
t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10);
t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12);
t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14);
t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
result[threadIdx.x] = (unsigned char)val;
if (threadIdx.x < 8)
{
auto data_int = (int*)result;
((int*)desc)[threadIdx.x] = data_int[threadIdx.x];
}
}
void ORB::ComputeDescriptors(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> _descriptors, cudaStream_t stream)
{
if (_keypoints.empty())
{
return;
}
SAIGA_ASSERT(_keypoints.size() == _descriptors.size());
calcOrb_kernel<<<_keypoints.size(), 32, 0, stream>>>(tex, image, _keypoints, _descriptors);
}
__global__ void IC_Angle_kernel(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx >= keypoints.size()) return;
int m_01 = 0, m_10 = 0;
const int2 loc = make_int2(keypoints[ptidx].point(0), keypoints[ptidx].point(1));
// Treat the center line differently, v=0
for (int u = threadIdx.x - HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; u += blockDim.x)
{
m_10 += u * tex2D<unsigned char>(tex, loc.x + u, loc.y);
}
m_10 = Saiga::CUDA::warpReduceSum<int, 32, false>(m_10);
for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
// int val_plus = image(loc.y + v, loc.x + u);
// int val_minus = image(loc.y - v, loc.x + u);
int val_plus = tex2D<unsigned char>(tex, loc.x + u, loc.y + v);
int val_minus = tex2D<unsigned char>(tex, loc.x + u, loc.y - v);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
m_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(m_sum);
v_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(v_sum);
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = atan2((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * float(M_PI));
kp_dir *= 180.0f / float(M_PI);
keypoints[ptidx].angle = kp_dir;
}
}
__global__ void addBorder_kernel(Saiga::KeyPoint<float>* keypoints, int npoints, int minBorderX, int minBorderY,
int octave, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npoints)
{
return;
}
keypoints[tid].point(0) += minBorderX;
keypoints[tid].point(1) += minBorderY;
keypoints[tid].octave = octave;
keypoints[tid].size = size;
}
void ORB::ComputeAngles(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints, int minBorderX, int minBorderY, int octave,
int size, cudaStream_t stream)
{
if (_keypoints.empty())
{
return;
}
{
dim3 block(256);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.x));
addBorder_kernel<<<grid, block, 0, stream>>>(_keypoints.data(), _keypoints.size(), minBorderX, minBorderY,
octave, size);
}
{
dim3 block(32, 8);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.y));
IC_Angle_kernel<<<grid, block, 0, stream>>>(tex, image, _keypoints);
}
}
} // namespace CUDA
} // namespace Saiga
|
d6ccd79415b66ce67aaede52380d6a45ca3ad697.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <c10/hip/HIPException.h>
int safeDeviceCount() {
int count;
hipError_t err = hipGetDeviceCount(&count);
if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void CUDA##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
hipDeviceSynchronize(); \
hipLaunchKernelGGL(( CUDA##a##b), dim3(1), dim3(1), 0, 0, ); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
hipDeviceSynchronize(); \
ASSERT_EQ(hipGetLastError(), hipSuccess); \
} \
__global__ void CUDA##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
| d6ccd79415b66ce67aaede52380d6a45ca3ad697.cu | #include <gtest/gtest.h>
#include <c10/cuda/CUDAException.h>
int safeDeviceCount() {
int count;
cudaError_t err = cudaGetDeviceCount(&count);
if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void CUDA##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
cudaDeviceSynchronize(); \
CUDA##a##b<<<1, 1>>>(); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
cudaDeviceSynchronize(); \
ASSERT_EQ(cudaGetLastError(), cudaSuccess); \
} \
__global__ void CUDA##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
|
59dface004c1b00d865c9b903538d0b97d1f0218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void add(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
| 59dface004c1b00d865c9b903538d0b97d1f0218.cu | __global__
void add(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
|
13ecfac6c2ed10fa57dd184446f54ccf02e51451.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 1
#include <wb.h>
//
// in1 = d_A
// in2 = d_B
// out = C
// len = n
//
//
//
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<len){ // Don't add when we're in the last entries in the last block that are extra
out[i] = in1[i]+in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void **) &deviceInput1, inputLength*sizeof(float));
hipMalloc((void **) &deviceInput2, inputLength*sizeof(float));
hipMalloc((void **) &deviceOutput, inputLength*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput1, hostInput1, inputLength*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, inputLength*sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( vecAdd), dim3(ceil(inputLength/256.0)), dim3(256), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput1);
hipFree(deviceInput2);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| 13ecfac6c2ed10fa57dd184446f54ccf02e51451.cu | // MP 1
#include <wb.h>
//
// in1 = d_A
// in2 = d_B
// out = C
// len = n
//
//
//
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<len){ // Don't add when we're in the last entries in the last block that are extra
out[i] = in1[i]+in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **) &deviceInput1, inputLength*sizeof(float));
cudaMalloc((void **) &deviceInput2, inputLength*sizeof(float));
cudaMalloc((void **) &deviceOutput, inputLength*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput1, hostInput1, inputLength*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, inputLength*sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
vecAdd<<<ceil(inputLength/256.0), 256>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput1);
cudaFree(deviceInput2);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
7a3bc0ce8e55fc3e59a8c489dec481e637160569.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Two kernels, no shared memory, manual laplacian, 2D malloc */
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define M(a, z, x) (*((float *) ((char *)a + z * model_pitch) + x))
#define WF(a, b, z, x) (*((float *) ((char *)a + b * nz * model_pitch + (z) * model_pitch) + x))
#define SA(a, b, s, t) (*((float *) ((char *)a + b * ns * source_amplitude_pitch + s * source_amplitude_pitch) + t))
#define SL(a, b, s) (*((int *) ((char *)a + b * sources_loc_pitch) + s))
__constant__ float fd_d[3];
size_t model_pitch_h;
// Device code
__global__ void step_d(const float *const model,
float *wfc,
float *wfp,
const int nb, const int nz, const int nx,
const size_t model_pitch)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int z = blockDim.y * blockIdx.y + threadIdx.y;
int b = blockDim.z * blockIdx.z + threadIdx.z;
float lap;
bool in_domain = (x > 1) && (x < nx - 2)
&& (z > 1) && (z < nz - 2)
&& (b < nb);
if (in_domain)
{
/* Laplacian */
lap = (fd_d[0] * WF(wfc, b, z, x) +
fd_d[1] *
(WF(wfc, b, z, x + 1) +
WF(wfc, b, z, x - 1) +
WF(wfc, b, z + 1, x) +
WF(wfc, b, z - 1, x)) +
fd_d[2] *
(WF(wfc, b, z, x + 2) +
WF(wfc, b, z, x - 2) +
WF(wfc, b, z + 2, x) +
WF(wfc, b, z - 2, x)));
/* Main evolution equation */
WF(wfp, b, z, x) = M(model, z, x) * lap + 2 * WF(wfc, b, z, x)
- WF(wfp, b, z, x);
}
}
__global__ void add_sources_d(const float *const model,
float *wfp,
const float *const source_amplitude,
const int *const sources_z,
const int *const sources_x,
const int nz, const int nx,
const int nt, const int ns, const int it,
const size_t model_pitch, const size_t source_amplitude_pitch,
const size_t sources_loc_pitch)
{
int x = threadIdx.x;
int b = blockIdx.x;
int sz = SL(sources_z, b, x);
int sx = SL(sources_x, b, x);
WF(wfp, b, sz, sx) += SA(source_amplitude, b, x, it) * M(model, sz, sx);
}
// Host code
extern "C"
void setup(int nb, int nz, int nx, float dx, float *model_h,
float **model_d, float **wfc_d, float **wfp_d)
{
float fd[3] = {
-10.0f / 2 / (dx * dx),
4.0f / 3 / (dx * dx),
-1.0f / 12 / (dx * dx)
};
gpuErrchk(hipMemcpyToSymbol(fd_d, fd, 3*sizeof(float)));
gpuErrchk(hipMallocPitch(model_d, &model_pitch_h, nx * sizeof(float),
nz));
gpuErrchk(hipMemcpy2D(*model_d, model_pitch_h, model_h,
nx * sizeof(float), nx * sizeof(float),
nz, hipMemcpyHostToDevice));
gpuErrchk(hipMallocPitch(wfc_d, &model_pitch_h, nx * sizeof(float),
nb * nz));
gpuErrchk(hipMemset2D(*wfc_d, model_pitch_h, 0, nx * sizeof(float),
nb * nz));
gpuErrchk(hipMallocPitch(wfp_d, &model_pitch_h, nx * sizeof(float),
nb * nz));
gpuErrchk(hipMemset2D(*wfp_d, model_pitch_h, 0, nx * sizeof(float),
nb * nz));
}
extern "C"
void step(int nb, int nz, int nx, int nt, int ns,
float *model_d, float *wfc_d, float *wfp_d,
float *source_amplitude_h,
int *sources_z_h, int *sources_x_h, float *wfc_h)
{
size_t source_amplitude_pitch;
size_t sources_loc_pitch;
float *source_amplitude_d;
gpuErrchk(hipMallocPitch(&source_amplitude_d,
&source_amplitude_pitch,
nt * sizeof(float), nb * ns));
gpuErrchk(hipMemcpy2D(source_amplitude_d, source_amplitude_pitch,
source_amplitude_h,
nt * sizeof(float), nt * sizeof(float),
nb * ns, hipMemcpyHostToDevice));
int *sources_z_d;
gpuErrchk(hipMallocPitch(&sources_z_d, &sources_loc_pitch,
ns * sizeof(int), nb));
gpuErrchk(hipMemcpy2D(sources_z_d, sources_loc_pitch, sources_z_h,
ns * sizeof(int), ns * sizeof(int),
nb, hipMemcpyHostToDevice));
int *sources_x_d;
gpuErrchk(hipMallocPitch(&sources_x_d, &sources_loc_pitch,
ns * sizeof(int), nb));
gpuErrchk(hipMemcpy2D(sources_x_d, sources_loc_pitch, sources_x_h,
ns * sizeof(int), ns * sizeof(int),
nb, hipMemcpyHostToDevice));
dim3 dimBlock(32, 32, 1);
int gridx = (nx + dimBlock.x - 1) / dimBlock.x;
int gridz = (nz + dimBlock.y - 1) / dimBlock.y;
int gridb = (nb + dimBlock.z - 1) / dimBlock.z;
dim3 dimGrid(gridx, gridz, gridb);
int it;
float *tmp;
for (it = 0; it < nt; it++)
{
hipLaunchKernelGGL(( step_d), dim3(dimGrid), dim3(dimBlock), 0, 0, model_d, wfc_d, wfp_d,
nb, nz, nx, model_pitch_h);
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( add_sources_d), dim3(nb), dim3(ns), 0, 0, model_d, wfp_d,
source_amplitude_d, sources_z_d, sources_x_d,
nz, nx, nt, ns, it, model_pitch_h,
source_amplitude_pitch, sources_loc_pitch);
gpuErrchk( hipPeekAtLastError() );
tmp = wfc_d;
wfc_d = wfp_d;
wfp_d = tmp;
}
gpuErrchk(hipMemcpy2D(wfc_h, nx * sizeof(float), wfc_d,
model_pitch_h, nx * sizeof(float),
nb * nz, hipMemcpyDeviceToHost));
gpuErrchk(hipFree(source_amplitude_d));
gpuErrchk(hipFree(sources_z_d));
gpuErrchk(hipFree(sources_x_d));
}
extern "C"
void finalise(float *model_d, float *wfc_d, float *wfp_d)
{
gpuErrchk(hipFree(model_d));
gpuErrchk(hipFree(wfc_d));
gpuErrchk(hipFree(wfp_d));
}
| 7a3bc0ce8e55fc3e59a8c489dec481e637160569.cu | /* Two kernels, no shared memory, manual laplacian, 2D malloc */
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define M(a, z, x) (*((float *) ((char *)a + z * model_pitch) + x))
#define WF(a, b, z, x) (*((float *) ((char *)a + b * nz * model_pitch + (z) * model_pitch) + x))
#define SA(a, b, s, t) (*((float *) ((char *)a + b * ns * source_amplitude_pitch + s * source_amplitude_pitch) + t))
#define SL(a, b, s) (*((int *) ((char *)a + b * sources_loc_pitch) + s))
__constant__ float fd_d[3];
size_t model_pitch_h;
// Device code
__global__ void step_d(const float *const model,
float *wfc,
float *wfp,
const int nb, const int nz, const int nx,
const size_t model_pitch)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int z = blockDim.y * blockIdx.y + threadIdx.y;
int b = blockDim.z * blockIdx.z + threadIdx.z;
float lap;
bool in_domain = (x > 1) && (x < nx - 2)
&& (z > 1) && (z < nz - 2)
&& (b < nb);
if (in_domain)
{
/* Laplacian */
lap = (fd_d[0] * WF(wfc, b, z, x) +
fd_d[1] *
(WF(wfc, b, z, x + 1) +
WF(wfc, b, z, x - 1) +
WF(wfc, b, z + 1, x) +
WF(wfc, b, z - 1, x)) +
fd_d[2] *
(WF(wfc, b, z, x + 2) +
WF(wfc, b, z, x - 2) +
WF(wfc, b, z + 2, x) +
WF(wfc, b, z - 2, x)));
/* Main evolution equation */
WF(wfp, b, z, x) = M(model, z, x) * lap + 2 * WF(wfc, b, z, x)
- WF(wfp, b, z, x);
}
}
__global__ void add_sources_d(const float *const model,
float *wfp,
const float *const source_amplitude,
const int *const sources_z,
const int *const sources_x,
const int nz, const int nx,
const int nt, const int ns, const int it,
const size_t model_pitch, const size_t source_amplitude_pitch,
const size_t sources_loc_pitch)
{
int x = threadIdx.x;
int b = blockIdx.x;
int sz = SL(sources_z, b, x);
int sx = SL(sources_x, b, x);
WF(wfp, b, sz, sx) += SA(source_amplitude, b, x, it) * M(model, sz, sx);
}
// Host code
extern "C"
void setup(int nb, int nz, int nx, float dx, float *model_h,
float **model_d, float **wfc_d, float **wfp_d)
{
float fd[3] = {
-10.0f / 2 / (dx * dx),
4.0f / 3 / (dx * dx),
-1.0f / 12 / (dx * dx)
};
gpuErrchk(cudaMemcpyToSymbol(fd_d, fd, 3*sizeof(float)));
gpuErrchk(cudaMallocPitch(model_d, &model_pitch_h, nx * sizeof(float),
nz));
gpuErrchk(cudaMemcpy2D(*model_d, model_pitch_h, model_h,
nx * sizeof(float), nx * sizeof(float),
nz, cudaMemcpyHostToDevice));
gpuErrchk(cudaMallocPitch(wfc_d, &model_pitch_h, nx * sizeof(float),
nb * nz));
gpuErrchk(cudaMemset2D(*wfc_d, model_pitch_h, 0, nx * sizeof(float),
nb * nz));
gpuErrchk(cudaMallocPitch(wfp_d, &model_pitch_h, nx * sizeof(float),
nb * nz));
gpuErrchk(cudaMemset2D(*wfp_d, model_pitch_h, 0, nx * sizeof(float),
nb * nz));
}
extern "C"
void step(int nb, int nz, int nx, int nt, int ns,
float *model_d, float *wfc_d, float *wfp_d,
float *source_amplitude_h,
int *sources_z_h, int *sources_x_h, float *wfc_h)
{
size_t source_amplitude_pitch;
size_t sources_loc_pitch;
float *source_amplitude_d;
gpuErrchk(cudaMallocPitch(&source_amplitude_d,
&source_amplitude_pitch,
nt * sizeof(float), nb * ns));
gpuErrchk(cudaMemcpy2D(source_amplitude_d, source_amplitude_pitch,
source_amplitude_h,
nt * sizeof(float), nt * sizeof(float),
nb * ns, cudaMemcpyHostToDevice));
int *sources_z_d;
gpuErrchk(cudaMallocPitch(&sources_z_d, &sources_loc_pitch,
ns * sizeof(int), nb));
gpuErrchk(cudaMemcpy2D(sources_z_d, sources_loc_pitch, sources_z_h,
ns * sizeof(int), ns * sizeof(int),
nb, cudaMemcpyHostToDevice));
int *sources_x_d;
gpuErrchk(cudaMallocPitch(&sources_x_d, &sources_loc_pitch,
ns * sizeof(int), nb));
gpuErrchk(cudaMemcpy2D(sources_x_d, sources_loc_pitch, sources_x_h,
ns * sizeof(int), ns * sizeof(int),
nb, cudaMemcpyHostToDevice));
dim3 dimBlock(32, 32, 1);
int gridx = (nx + dimBlock.x - 1) / dimBlock.x;
int gridz = (nz + dimBlock.y - 1) / dimBlock.y;
int gridb = (nb + dimBlock.z - 1) / dimBlock.z;
dim3 dimGrid(gridx, gridz, gridb);
int it;
float *tmp;
for (it = 0; it < nt; it++)
{
step_d<<<dimGrid, dimBlock>>>(model_d, wfc_d, wfp_d,
nb, nz, nx, model_pitch_h);
gpuErrchk( cudaPeekAtLastError() );
add_sources_d<<<nb, ns>>>(model_d, wfp_d,
source_amplitude_d, sources_z_d, sources_x_d,
nz, nx, nt, ns, it, model_pitch_h,
source_amplitude_pitch, sources_loc_pitch);
gpuErrchk( cudaPeekAtLastError() );
tmp = wfc_d;
wfc_d = wfp_d;
wfp_d = tmp;
}
gpuErrchk(cudaMemcpy2D(wfc_h, nx * sizeof(float), wfc_d,
model_pitch_h, nx * sizeof(float),
nb * nz, cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(source_amplitude_d));
gpuErrchk(cudaFree(sources_z_d));
gpuErrchk(cudaFree(sources_x_d));
}
extern "C"
void finalise(float *model_d, float *wfc_d, float *wfp_d)
{
gpuErrchk(cudaFree(model_d));
gpuErrchk(cudaFree(wfc_d));
gpuErrchk(cudaFree(wfp_d));
}
|
1882f6b40f0353b652227eb8a457cfe559914994.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| 1882f6b40f0353b652227eb8a457cfe559914994.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
fe763533c46b820e1a8b812d84dc3cfddce9c00d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _TNT_KERNEL_H_
#define _TNT_KERNEL_H_
#include <stdio.h>
//DB information
__constant__ int const_numDB1 = 169; //total number of DB1 chewbacks
__constant__ char const_d_DB1[1448]; //constant memory allocation for DB chewbacks minus full chew back
__constant__ int const_d_DB1_base[169]; //constant memory contains location of each starting sequence in d_DB1
__constant__ int const_d_numOccurrenceDB1[169]; //Number of ways for particular DB1 chewback
__constant__ int const_d_numUniqueCharDB1[169]; //number of characters in a unique occurence of DB1
//V information
__constant__ int const_numV = 342; //total number of V sequences in all V files
__constant__ char const_d_V[3107]; //holds all V chewback sequences
__constant__ int const_d_V_base[342]; //contains the starting index of each V sequence
__constant__ int const_d_numUniqueCharV[342]; //number of characters in a unique occurence of V
//J information
__constant__ int const_numJ = 271; //total number of J sequences in all J files
__constant__ char const_d_J[3210]; //holds all J chewback sequences
__constant__ int const_d_J_base[271]; //contains the starting index of each J sequence
__constant__ int const_d_numUniqueCharJ[271]; //number of characters in a unique occurence of J
__constant__ int c_DB_Full_Chew_Occur; //current V sequence
__constant__ int c_Vnum; //current V sequence
__constant__ int c_Dnum; //current D sequence
__constant__ int c_Jnum; //current J sequence
__constant__ int c_n; //current n value
__constant__ int c_V_Begin; //Base index for V sequences
__constant__ int c_V_End; //End index for V sequences
__constant__ int c_J_Begin; //Base index for J sequences
__constant__ int c_J_End; //End index for J sequences
__constant__ int const_d_VJ_Pairs[NUM_V_FILES*NUM_J_FILES];
__constant__ int const_VJ_Pair_Base[NUM_V_FILES*NUM_J_FILES];
__constant__ int c_NUM_V_FILES = 20;
__constant__ int c_NUM_J_FILES = 12;
/////////////////////////////////////////////////
//kernel for 64 threads or less
/////////////////////////////////////////////////
__global__ void
TNT_kernel_InVivo64(unsigned int* d_Results, char* d_InVivo_cp64)
{
volatile __shared__ char iterSeq_sm[64]; //the thread block size we will use for this kernel is 64
volatile __shared__ int result_sm[512]; //the max thread-block size
//The four possible bases
char base[4] = {'A', 'T', 'G', 'C'};
char nSeq[12]; //will hold a single n combination
int Vnum = c_Vnum; //current V file
int Jnum = c_Jnum; //current J file
int sh_index; //used as a shared memory index
int sum; //holds an iterative sum for result
char tmpChar; //used to temporarily hold a character
//obtain a unique global index for each thread in the grid
unsigned int g_tid = blockIdx.x*blockDim.x + threadIdx.x;
if(c_n){
nSeq[0] = base[g_tid%4]; //n = 1
nSeq[1] = base[(g_tid+(g_tid/4))%4]; //n = 2
nSeq[2] = base[(g_tid+(g_tid/16))%4]; //n = 3
nSeq[3] = base[(g_tid+(g_tid/64))%4]; //n = 4
nSeq[4] = base[(g_tid+(g_tid/256))%4]; //n = 5
nSeq[5] = base[(g_tid+(g_tid/1024))%4]; //n = 6
nSeq[6] = base[(g_tid+(g_tid/4096))%4]; //n = 7
nSeq[7] = base[(g_tid+(g_tid/16384))%4]; //n = 8
nSeq[8] = base[(g_tid+(g_tid/65536))%4]; //n = 9
nSeq[9] = base[(g_tid+(g_tid/262144))%4]; //n = 10
nSeq[10] = base[(g_tid+(g_tid/1048576))%4]; //n = 11
nSeq[11] = base[(g_tid+(g_tid/4194304))%4]; //n = 12
}
//get the number of InVivo VJ sequences we need to go through
int num_Seqs = const_d_VJ_Pairs[Vnum*12 + Jnum]; //multiply by 12. Number of J files.
//int whichSeq; //which sequence is our current thread-block working on in the scope of current VJ
int seqLen; //length of our current sequence
int pairBase = const_VJ_Pair_Base[Vnum*12 + Jnum] * 64; //The base address for a given VJ pair
//iterate through all InVivo combinations for current VJ pair
for(int i = 0; i < num_Seqs; i++){
result_sm[threadIdx.x] = 0;
sum = 0; //reset our result
__syncthreads();
//store an InVivo combination into the shared memory "iterResults_sm[]"
if(blockDim.x < 64){ //iter through VJ seq if block dim < 64. There's only 1 block
for(int j = 0; j < (64 / blockDim.x); j++){ //iterations = sequence allocation / block size
int k = j*blockDim.x+threadIdx.x; //create local SM index
int gl_index = (pairBase + i*64) + k; //create global memory index
iterSeq_sm[k] = d_InVivo_cp64[gl_index]; //read the current InVivo sequence from the global memory
}
}
else{ //only threads < 64 will read inVivo data
if(threadIdx.x < 64){
int gl_index = (pairBase + i*64) + threadIdx.x; //create global memory index
iterSeq_sm[threadIdx.x] = d_InVivo_cp64[gl_index];//read the current InVivo sequence from the global memory
}
}
//if(blockDim.x > 1)
__syncthreads();
//get the length of current sequence for all threads in current thread-block
seqLen = (int)iterSeq_sm[2];
//set our shared memory index to the base of the sequence characters in shared array
sh_index = 3;
int n_p1 = c_n + 1;
int n_cnt, k;
int length; //length of each sequence we generate
bool Vmatch = 1; //Is there a V sequence match?
bool seqMatch = 1; //Is the entire sequence a match?
//////////////////////////////////////////////////////////////////////////////////
//First compare our InVivo Sequences to VnDnJ combinations with no full chewbacks
//////////////////////////////////////////////////////////////////////////////////
for(int Vindx = c_V_Begin; Vindx < c_V_End; Vindx++){ //go through relevent V sequences
Vmatch = 1; //assume V is a match before we check it
seqMatch = 1;
/////////////////////////////////////////////////////////
//Compare InVivo Sequence to Vn comb with D and J chewed
/////////////////////////////////////////////////////////
length = const_d_numUniqueCharV[Vindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen == length){
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;} //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue; //check the next V sequence
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if (seqMatch)
sum += c_DB_Full_Chew_Occur; //sum++; //if we've made it this far, the sequences match.
}
if(Vmatch == 0) continue;
///////////////////////////////////
//check D and J combinations
///////////////////////////////////
for(int Dindx = 0; Dindx < const_numDB1 && Vmatch; Dindx++){ //go through all D sequences
/////////////////////////////////////////
//compare VnDn with J fully chewed back
/////////////////////////////////////////
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen == length){
for(int j = 0; j < n_p1 && Vmatch; j++){ //go through each n addition (n + 1)
seqMatch = 1; //Assume initially a sequence match
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;}//End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
} //end iterating through n sequences
} //end checking VnDn comparisons
///////////////////////////////////
//check D J combinations
///////////////////////////////////
for(int Jindx = c_J_Begin; Jindx < c_J_End && Vmatch; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharJ[Jindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
seqMatch = 1; //Assume initially a sequence match
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; Vmatch = 0; break;}; //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0)break; //exit current v comparisons if V is not a match
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
} //end iterating through n sequences
} //end iterating through j sequences
} //end iterating through d sequences
} //end iterating through v sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to VnJ with D full chewback
//---------------------------------------------------------------------------------
for(int Vindx = c_V_Begin; Vindx < c_V_End; Vindx++){ //go through relevent V sequences
Vmatch = 1;
for(int Jindx = c_J_Begin; Jindx < c_J_End && Vmatch; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharJ[Jindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
seqMatch = 1;
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;} //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
sum += c_DB_Full_Chew_Occur; //if we've made it this far, the sequences match.
} //end iterating through j sequences
} //end iterating through v sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to nDn combinations with V and J fully chewed
//---------------------------------------------------------------------------------
for(int Dindx = 0; Dindx < const_numDB1; Dindx++){ //go through all D sequences
length = const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++;
}
}
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
nDn: continue; //if there is no match go to next n variance
} //end iterating through n sequences
} //end iterating through d sequences
//-----------------------------------------------------------------------------------
//Compare our InVivo Sequences to nJ combinations with V and D chewed
//-----------------------------------------------------------------------------------
for(int Jindx = c_J_Begin; Jindx < c_J_End; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharJ[Jindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]) goto nJ; //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]) goto nJ; //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
//sum++; //if we've made it this far, the sequences match.
sum += c_DB_Full_Chew_Occur;
nJ: continue; //if there is no match go to next n variance
} //end iterating through j sequences
//----------------------------------------------------------------------------------
//First compare our InVivo Sequences to nDnJ combinations with no full chewbacks
//----------------------------------------------------------------------------------
for(int Dindx = 0; Dindx < const_numDB1; Dindx++){ //go through all D sequences
for(int Jindx = c_J_Begin; Jindx < c_J_End; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharJ[Jindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
n_cnt--;
//sum++; //if we've made it this far, the sequences match.
sum += const_d_numOccurrenceDB1[Dindx];
nDnJ: continue; //if there is no match go to next n variance
} //end iterating through n sequences
} //end iterating through j sequences
} //end iterating through d sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to n with all full chewbacks
//---------------------------------------------------------------------------------
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != c_n) goto n_only_done;
////////////////////////////////////////////////////
//begin generating sequences with just n
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
//add n combination
for(int m = 0; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]) goto n_only_done; //jump to next iteration if a character does not match
sh_index++;
}
//sum++; //if we've made it this far, the sequences match.
sum += c_DB_Full_Chew_Occur;
n_only_done:
//-------------------------------------------------------------------------------------------------------
//If only 1 thread-block, then we can write results to RAM using just InVivo sequence number
//-------------------------------------------------------------------------------------------------------
if(blockDim.x == 1){ //if there is only 1 thread per block, just use i as global memory index. No need for reduction
d_Results[i] = sum;
}
//-------------------------------------------------------------------------------------------------------
// Perform Reduction of Results if more than 1 thread-block
//-------------------------------------------------------------------------------------------------------
//reduction for current InVivo sequence in shared memory
if(blockDim.x > 1){
result_sm[threadIdx.x] = sum; //write a threads sum to the shared memory
__syncthreads(); //make sure all sums have been written before proceeding
int half = blockDim.x/2;
while(1){ //how many reductions we need
if(threadIdx.x < half){ //only certain threads perform reduction
result_sm[threadIdx.x] += result_sm[threadIdx.x + half];
}
__syncthreads();
if(half == 1) break;
half = half/2;
}
__syncthreads();
//write results to the global memory. Each thread-block writes 1 result for each InVivo Sequence i
if(threadIdx.x == 0){ //we need only 1 thread in the thread block to write its result
d_Results[i*gridDim.x + blockIdx.x] = result_sm[0]; //write our consolidated result into the global memory
}
} //end result reduction
} //end iterating through InVivo Sequences
return;
} //kernel done
#endif // #ifndef _TNT_KERNEL_H_
| fe763533c46b820e1a8b812d84dc3cfddce9c00d.cu | #ifndef _TNT_KERNEL_H_
#define _TNT_KERNEL_H_
#include <stdio.h>
//DB information
__constant__ int const_numDB1 = 169; //total number of DB1 chewbacks
__constant__ char const_d_DB1[1448]; //constant memory allocation for DB chewbacks minus full chew back
__constant__ int const_d_DB1_base[169]; //constant memory contains location of each starting sequence in d_DB1
__constant__ int const_d_numOccurrenceDB1[169]; //Number of ways for particular DB1 chewback
__constant__ int const_d_numUniqueCharDB1[169]; //number of characters in a unique occurence of DB1
//V information
__constant__ int const_numV = 342; //total number of V sequences in all V files
__constant__ char const_d_V[3107]; //holds all V chewback sequences
__constant__ int const_d_V_base[342]; //contains the starting index of each V sequence
__constant__ int const_d_numUniqueCharV[342]; //number of characters in a unique occurence of V
//J information
__constant__ int const_numJ = 271; //total number of J sequences in all J files
__constant__ char const_d_J[3210]; //holds all J chewback sequences
__constant__ int const_d_J_base[271]; //contains the starting index of each J sequence
__constant__ int const_d_numUniqueCharJ[271]; //number of characters in a unique occurence of J
__constant__ int c_DB_Full_Chew_Occur; //current V sequence
__constant__ int c_Vnum; //current V sequence
__constant__ int c_Dnum; //current D sequence
__constant__ int c_Jnum; //current J sequence
__constant__ int c_n; //current n value
__constant__ int c_V_Begin; //Base index for V sequences
__constant__ int c_V_End; //End index for V sequences
__constant__ int c_J_Begin; //Base index for J sequences
__constant__ int c_J_End; //End index for J sequences
__constant__ int const_d_VJ_Pairs[NUM_V_FILES*NUM_J_FILES];
__constant__ int const_VJ_Pair_Base[NUM_V_FILES*NUM_J_FILES];
__constant__ int c_NUM_V_FILES = 20;
__constant__ int c_NUM_J_FILES = 12;
/////////////////////////////////////////////////
//kernel for 64 threads or less
/////////////////////////////////////////////////
__global__ void
TNT_kernel_InVivo64(unsigned int* d_Results, char* d_InVivo_cp64)
{
volatile __shared__ char iterSeq_sm[64]; //the thread block size we will use for this kernel is 64
volatile __shared__ int result_sm[512]; //the max thread-block size
//The four possible bases
char base[4] = {'A', 'T', 'G', 'C'};
char nSeq[12]; //will hold a single n combination
int Vnum = c_Vnum; //current V file
int Jnum = c_Jnum; //current J file
int sh_index; //used as a shared memory index
int sum; //holds an iterative sum for result
char tmpChar; //used to temporarily hold a character
//obtain a unique global index for each thread in the grid
unsigned int g_tid = blockIdx.x*blockDim.x + threadIdx.x;
if(c_n){
nSeq[0] = base[g_tid%4]; //n = 1
nSeq[1] = base[(g_tid+(g_tid/4))%4]; //n = 2
nSeq[2] = base[(g_tid+(g_tid/16))%4]; //n = 3
nSeq[3] = base[(g_tid+(g_tid/64))%4]; //n = 4
nSeq[4] = base[(g_tid+(g_tid/256))%4]; //n = 5
nSeq[5] = base[(g_tid+(g_tid/1024))%4]; //n = 6
nSeq[6] = base[(g_tid+(g_tid/4096))%4]; //n = 7
nSeq[7] = base[(g_tid+(g_tid/16384))%4]; //n = 8
nSeq[8] = base[(g_tid+(g_tid/65536))%4]; //n = 9
nSeq[9] = base[(g_tid+(g_tid/262144))%4]; //n = 10
nSeq[10] = base[(g_tid+(g_tid/1048576))%4]; //n = 11
nSeq[11] = base[(g_tid+(g_tid/4194304))%4]; //n = 12
}
//get the number of InVivo VJ sequences we need to go through
int num_Seqs = const_d_VJ_Pairs[Vnum*12 + Jnum]; //multiply by 12. Number of J files.
//int whichSeq; //which sequence is our current thread-block working on in the scope of current VJ
int seqLen; //length of our current sequence
int pairBase = const_VJ_Pair_Base[Vnum*12 + Jnum] * 64; //The base address for a given VJ pair
//iterate through all InVivo combinations for current VJ pair
for(int i = 0; i < num_Seqs; i++){
result_sm[threadIdx.x] = 0;
sum = 0; //reset our result
__syncthreads();
//store an InVivo combination into the shared memory "iterResults_sm[]"
if(blockDim.x < 64){ //iter through VJ seq if block dim < 64. There's only 1 block
for(int j = 0; j < (64 / blockDim.x); j++){ //iterations = sequence allocation / block size
int k = j*blockDim.x+threadIdx.x; //create local SM index
int gl_index = (pairBase + i*64) + k; //create global memory index
iterSeq_sm[k] = d_InVivo_cp64[gl_index]; //read the current InVivo sequence from the global memory
}
}
else{ //only threads < 64 will read inVivo data
if(threadIdx.x < 64){
int gl_index = (pairBase + i*64) + threadIdx.x; //create global memory index
iterSeq_sm[threadIdx.x] = d_InVivo_cp64[gl_index];//read the current InVivo sequence from the global memory
}
}
//if(blockDim.x > 1)
__syncthreads();
//get the length of current sequence for all threads in current thread-block
seqLen = (int)iterSeq_sm[2];
//set our shared memory index to the base of the sequence characters in shared array
sh_index = 3;
int n_p1 = c_n + 1;
int n_cnt, k;
int length; //length of each sequence we generate
bool Vmatch = 1; //Is there a V sequence match?
bool seqMatch = 1; //Is the entire sequence a match?
//////////////////////////////////////////////////////////////////////////////////
//First compare our InVivo Sequences to VnDnJ combinations with no full chewbacks
//////////////////////////////////////////////////////////////////////////////////
for(int Vindx = c_V_Begin; Vindx < c_V_End; Vindx++){ //go through relevent V sequences
Vmatch = 1; //assume V is a match before we check it
seqMatch = 1;
/////////////////////////////////////////////////////////
//Compare InVivo Sequence to Vn comb with D and J chewed
/////////////////////////////////////////////////////////
length = const_d_numUniqueCharV[Vindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen == length){
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;} //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue; //check the next V sequence
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if (seqMatch)
sum += c_DB_Full_Chew_Occur; //sum++; //if we've made it this far, the sequences match.
}
if(Vmatch == 0) continue;
///////////////////////////////////
//check D and J combinations
///////////////////////////////////
for(int Dindx = 0; Dindx < const_numDB1 && Vmatch; Dindx++){ //go through all D sequences
/////////////////////////////////////////
//compare VnDn with J fully chewed back
/////////////////////////////////////////
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen == length){
for(int j = 0; j < n_p1 && Vmatch; j++){ //go through each n addition (n + 1)
seqMatch = 1; //Assume initially a sequence match
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;}//End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
} //end iterating through n sequences
} //end checking VnDn comparisons
///////////////////////////////////
//check D J combinations
///////////////////////////////////
for(int Jindx = c_J_Begin; Jindx < c_J_End && Vmatch; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharJ[Jindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
seqMatch = 1; //Assume initially a sequence match
sh_index = 3; //reset our shared memory index
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; Vmatch = 0; break;}; //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0)break; //exit current v comparisons if V is not a match
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
} //end iterating through n sequences
} //end iterating through j sequences
} //end iterating through d sequences
} //end iterating through v sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to VnJ with D full chewback
//---------------------------------------------------------------------------------
for(int Vindx = c_V_Begin; Vindx < c_V_End; Vindx++){ //go through relevent V sequences
Vmatch = 1;
for(int Jindx = c_J_Begin; Jindx < c_J_End && Vmatch; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharV[Vindx] + const_d_numUniqueCharJ[Jindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
seqMatch = 1;
k = const_d_V_base[Vindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharV[Vindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_V[k]; //load a V character into a temp variable
if(tmpChar != iterSeq_sm[sh_index]){Vmatch = 0; break;} //End V comparisons
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(Vmatch == 0) continue;
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++;
}
}
if(seqMatch == 0) continue;
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){seqMatch = 0; break;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(seqMatch == 0) continue;
sum += c_DB_Full_Chew_Occur; //if we've made it this far, the sequences match.
} //end iterating through j sequences
} //end iterating through v sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to nDn combinations with V and J fully chewed
//---------------------------------------------------------------------------------
for(int Dindx = 0; Dindx < const_numDB1; Dindx++){ //go through all D sequences
length = const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDn;} //jump to next iteration if a character does not match
sh_index++;
}
}
n_cnt--;
sum += const_d_numOccurrenceDB1[Dindx]; //if we've made it this far, the sequences match.
nDn: continue; //if there is no match go to next n variance
} //end iterating through n sequences
} //end iterating through d sequences
//-----------------------------------------------------------------------------------
//Compare our InVivo Sequences to nJ combinations with V and D chewed
//-----------------------------------------------------------------------------------
for(int Jindx = c_J_Begin; Jindx < c_J_End; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharJ[Jindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]) goto nJ; //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]) goto nJ; //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
//sum++; //if we've made it this far, the sequences match.
sum += c_DB_Full_Chew_Occur;
nJ: continue; //if there is no match go to next n variance
} //end iterating through j sequences
//----------------------------------------------------------------------------------
//First compare our InVivo Sequences to nDnJ combinations with no full chewbacks
//----------------------------------------------------------------------------------
for(int Dindx = 0; Dindx < const_numDB1; Dindx++){ //go through all D sequences
for(int Jindx = c_J_Begin; Jindx < c_J_End; Jindx++){ //go through relevent J sequences
length = const_d_numUniqueCharJ[Jindx] + const_d_numUniqueCharDB1[Dindx] + c_n;
n_cnt = c_n;
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != length) continue;
////////////////////////////////////////////////////
//begin generating sequences with no full chewbacks
////////////////////////////////////////////////////
for(int j = 0; j < n_p1; j++){ //go through each n addition (n + 1)
sh_index = 3; //reset our shared memory index
if(c_n != 0){
//add n combination
for(int m = 0; m < n_cnt; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current D sequence
k = const_d_DB1_base[Dindx]; //starting address of D sequence
for(int m = 0; m < const_d_numUniqueCharDB1[Dindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_DB1[k]; //store V character in shared memory
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
if(c_n != 0){
//add n combination
for(int m = n_cnt; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++;
}
}
//glue current J sequence
k = const_d_J_base[Jindx]; //starting address of V sequence
for(int m = 0; m < const_d_numUniqueCharJ[Jindx]; m++){ //go through each character in current V sequence
tmpChar = const_d_J[k];
if(tmpChar != iterSeq_sm[sh_index]){n_cnt--; goto nDnJ;} //jump to next iteration if a character does not match
sh_index++; //increment shared memory index
k++; //increment for next character
}
n_cnt--;
//sum++; //if we've made it this far, the sequences match.
sum += const_d_numOccurrenceDB1[Dindx];
nDnJ: continue; //if there is no match go to next n variance
} //end iterating through n sequences
} //end iterating through j sequences
} //end iterating through d sequences
//---------------------------------------------------------------------------------
//Compare our InVivo Sequences to n with all full chewbacks
//---------------------------------------------------------------------------------
//check to see if sequence to create is the same length as the InVivo sequence
if(seqLen != c_n) goto n_only_done;
////////////////////////////////////////////////////
//begin generating sequences with just n
////////////////////////////////////////////////////
sh_index = 3; //reset our shared memory index
//add n combination
for(int m = 0; m < c_n; m++){
tmpChar = nSeq[m];
if(tmpChar != iterSeq_sm[sh_index]) goto n_only_done; //jump to next iteration if a character does not match
sh_index++;
}
//sum++; //if we've made it this far, the sequences match.
sum += c_DB_Full_Chew_Occur;
n_only_done:
//-------------------------------------------------------------------------------------------------------
//If only 1 thread-block, then we can write results to RAM using just InVivo sequence number
//-------------------------------------------------------------------------------------------------------
if(blockDim.x == 1){ //if there is only 1 thread per block, just use i as global memory index. No need for reduction
d_Results[i] = sum;
}
//-------------------------------------------------------------------------------------------------------
// Perform Reduction of Results if more than 1 thread-block
//-------------------------------------------------------------------------------------------------------
//reduction for current InVivo sequence in shared memory
if(blockDim.x > 1){
result_sm[threadIdx.x] = sum; //write a threads sum to the shared memory
__syncthreads(); //make sure all sums have been written before proceeding
int half = blockDim.x/2;
while(1){ //how many reductions we need
if(threadIdx.x < half){ //only certain threads perform reduction
result_sm[threadIdx.x] += result_sm[threadIdx.x + half];
}
__syncthreads();
if(half == 1) break;
half = half/2;
}
__syncthreads();
//write results to the global memory. Each thread-block writes 1 result for each InVivo Sequence i
if(threadIdx.x == 0){ //we need only 1 thread in the thread block to write its result
d_Results[i*gridDim.x + blockIdx.x] = result_sm[0]; //write our consolidated result into the global memory
}
} //end result reduction
} //end iterating through InVivo Sequences
return;
} //kernel done
#endif // #ifndef _TNT_KERNEL_H_
|
268667c0aef789e4cf9e8ab5c76570cf7173bd03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** Kernels for convUp, convDown, convOutp, maxpool, avgpool, maxpoolundo,
* avgpoolundo.
* These kernels are 10-20% slower than cuda-convnet2, but have no constraints
* on number of channels and support rectangular images and rectangular kernels.
* They use hipblasSgemm for convUp, convDown, convOutp.
* Data layout : Column-major
* data : (num_images, image_size_x, image_size_y, num_input_channels)
* filters : (num_output_channels, kernel_size_x, kernel_size_y, num_input_channels)
*/
#include "cudamat_conv_gemm.cuh"
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
size_t free_space_ = 0;
void EstimateFreeSpace() {
/*
size_t total;
hipMemGetInfo(&free_space_, &total);
//free_space_ >>= 1;
if (free_space_ > 1>>20) {
free_space_ -= 1 << 20; // Just remove 1 MB. This seems to work!
}
*/
// How to get free contiguous space ?
free_space_ = MAX_MEMORY_BYTES;
}
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void _Scale(cudamat* mat, float scale) {
if (scale == 0) {
hipMemset(mat->data_device, 0, sizeof(float) * mat->size[0] * mat->size[1]);
} else if (scale != 1) {
hipblasSscal(mat->size[0] * mat->size[1], scale, mat->data_device, 1);
}
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
__global__ void kExpand(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
int module_id_x = src_module_id % num_modules_x;
int module_id_y = src_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
target_id = num_images * num_modules_batch * (x + kernel_size_x * y);
source_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = 0;
}
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = images[source_id + im];
}
}
__syncthreads();
}
}
}
template <class Pooler>
__global__ void kPool(float* images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t,
float scaleOutput, Pooler pooler) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * num_modules_x * num_modules_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
target_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
int regionSize = (endX - startX) * (endY - startY) * (endT - startT);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = pooler.getBaseValue();
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
source_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T));
val = pooler(val, images[source_id + im]);
}
}
}
targets[target_id + im] = scaleOutput * pooler.output(val, regionSize);
}
}
__syncthreads();
}
__global__ void kAvgPoolUndo(float *derivs, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t, float scaleOutput) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
derivs += num_images * num_modules_x * num_modules_y * color;
targets += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
int regionSize = (endX - startX) * (endY - startY) * (endT - startT);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im] / regionSize;
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im;
atomicAdd(&targets[target_id], val);
__syncthreads();
}
}
}
}
}
}
__global__ void kMaxPoolUndo(float * images, float *derivs, float* maxes, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t, float scaleOutput) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
derivs += num_images * num_modules_x * num_modules_y * color;
maxes += num_images * num_modules_x * num_modules_y * color;
targets += num_images * image_size_x * image_size_y * color;
images += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im];
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im;
if (images[target_id] == maxes[source_id + im]) {
atomicAdd(&targets[target_id], val);
}
__syncthreads();
}
}
}
}
}
}
__global__ void kContract(float *expanded_data, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int dst_module_id = module_id_offset + blockIdx.x;
int src_module_id = blockIdx.x;
int module_id_x = dst_module_id % num_modules_x;
int module_id_y = dst_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
targets += num_images * image_size_x * image_size_y * color;
expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
source_id = num_images * num_modules_batch * (x + kernel_size_x * y);
target_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
// do nothing.
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
atomicAdd(&targets[target_id + im], expanded_data[source_id + im]);
__syncthreads();
}
}
}
}
}
__global__ void kWriteRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = beta * data[im];
}
}
__global__ void kReadRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset) {
int c = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
data += num_images * (src_module_id + c * num_modules);
target += num_images * (dst_module_id + c * num_modules_batch);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = data[im];
}
}
__global__ void kWriteRowsMult(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
}
__global__ void kCrossMapDenoms(float* data, float* denoms,
int num_locs, int batch_locs, int batch_offset, float addScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k/2 + j;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
denoms[j * batch_locs] = 1 + addScale * sum;
}
}
}
__global__ void kCrossMapRNorm(float* data, float* target,
int num_locs, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += loc_id;
target += loc_id;
if (loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k/2 + j;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
target[j * num_locs] = data[j * num_locs] * __powf(1 + addScale * sum, -powScale);
}
}
}
__global__ void kCrossMapRNormUndo(float* data, float* deriv, float* denoms, float* target,
int num_locs, int batch_locs, int batch_offset, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
target += batch_offset + loc_id;
deriv += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k + k/2 + j + 1;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += deriv[i * num_locs] * data[i * num_locs] * __powf(denoms[i * batch_locs], -powScale - 1);
}
target[j * num_locs] = deriv[j * num_locs] * __powf(denoms[j * batch_locs], -powScale) -
2 * addScale * powScale * data[j * num_locs] * sum;
}
}
}
void _convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D images_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_images = NULL, *expanded_target = NULL;
int num_modules_batch;
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
//max_batch_size = MAX(max_batch_size, 1);
//printf("Free space %ld max batch size %d\n", free_space, max_batch_size);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_target, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Could not allocate memory.\n");
/*
if (hipSuccess == err1) hipFree(expanded_images);
if (hipSuccess == err2) hipFree(expanded_target);
err1 = hipMalloc((void**)&expanded_images, input_memory_size);
err2 = hipMalloc((void**)&expanded_target, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 threads(num_threads_x);
dim3 blocks = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kExpand), dim3(blocks), dim3(threads), 0, 0, images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
hipblasSgemm('n', 't',
num_images * this_num_modules_batch, num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
1, expanded_images, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
dim3 blocks2 = dim3(this_num_modules_batch, num_output_channels);
if (scaleTargets == 0) {
hipLaunchKernelGGL(( kWriteRows), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleOutput);
} else {
hipLaunchKernelGGL(( kWriteRowsMult), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleTargets, scaleOutput);
}
module_id_start += this_num_modules_batch;
}
hipFree(expanded_images);
hipFree(expanded_target);
getLastCudaError("convUpGemm: kernel execution failed");
}
void _convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D derivs_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_target = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_target, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_target, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory\n");
/*
if (hipSuccess == err1) hipFree(expanded_target);
if (hipSuccess == err2) hipFree(expanded_derivs);
err1 = hipMalloc((void**)&expanded_target, input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
_Scale(targets, scaleTargets);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
dim3 threads(num_threads_x);
hipLaunchKernelGGL(( kReadRows), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
hipblasSgemm('n', 'n',
num_images * this_num_modules_batch, kernel_size_x * kernel_size_y * num_input_channels,
num_output_channels,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kContract), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
module_id_start += this_num_modules_batch;
}
hipFree(expanded_derivs);
hipFree(expanded_target);
getLastCudaError("convDownGemm: kernel execution failed");
}
void _convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D images_shape, Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3Mult = targets_shape.shape[3];
int kernel_size_y2 = targets_shape.shape[2];
int kernel_size_x2 = targets_shape.shape[1];
int num_output_channels3 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels * filterModuleMult == num_input_channels3Mult);
assert (num_images == images->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels3Mult == targets->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_images = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_images, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory.\n");
/*
if (hipSuccess == err1) hipFree(expanded_images);
if (hipSuccess == err2) hipFree(expanded_derivs);
err1 = hipMalloc((void**)&expanded_images, input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
_Scale(targets, scaleTargets);
int module_id_start = 0;
dim3 threads(num_threads_x);
float* dw = targets->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
hipLaunchKernelGGL(( kReadRows), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kExpand), dim3(blocks2), dim3(threads), 0, 0, images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) dw += num_output_channels * input_size;
hipblasSgemm('t', 'n',
num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
num_images * this_num_modules_batch,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
expanded_images, num_images * this_num_modules_batch,
1, dw, num_output_channels);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
module_id_start += this_num_modules_batch;
}
hipFree(expanded_derivs);
hipFree(expanded_images);
getLastCudaError("convOutpGemm: kernel execution failed");
}
template <class Pooler>
void _convPoolGemm(cudamat* images, cudamat* targets,
Shape4D images_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, Pooler pooler) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kPool), dim3(blocks), dim3(threads), 0, 0, images->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput,
pooler);
getLastCudaError("convLocalPool: kernel execution failed");
}
void _avgPoolUndoGemm(cudamat* derivs, cudamat* targets,
Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kAvgPoolUndo), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _maxPoolUndoGemm(cudamat* images, cudamat* derivs, cudamat* maxes,
cudamat* targets, Shape4D targets_shape,
Shape4D derivs_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kMaxPoolUndo), dim3(blocks), dim3(threads), 0, 0, images->data_device, derivs->data_device,
maxes->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput);
getLastCudaError("maxPoolUndo: kernel execution failed");
}
void _CrossMapRNorm(cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int num_blocks = DIVUP(num_locs, NUM_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kCrossMapRNorm), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, 0, images->data_device, targets->data_device,
num_locs, addScale, powScale, num_filters, sizeF, blocked);
getLastCudaError("_CrossMapRNorm: kernel execution failed");
}
void _CrossMapRNormUndo(cudamat* outGrads, cudamat* images, cudamat* targets,
int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int batch_offset = 0;
float *denoms;
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (sizeof(float) * num_filters);
max_batch_size = MIN(num_locs, max_batch_size);
max_batch_size = MIN(num_locs, MAX_BLOCKS_X);
hipError_t err;
err = hipMalloc((void**)&denoms, max_batch_size * num_filters * sizeof(float));
if (hipSuccess != err) {
printf("Out of memory on GPU!\n");
}
int num_batches = DIVUP(num_locs, max_batch_size);
for (int i = 0; i < num_batches; i++) {
int batch_size = MIN(max_batch_size, num_locs - batch_offset);
int num_blocks = DIVUP(batch_size, NUM_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kCrossMapDenoms), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, 0, images->data_device, denoms, num_locs, batch_size,
batch_offset, addScale, num_filters, sizeF, blocked);
hipLaunchKernelGGL(( kCrossMapRNormUndo), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, 0, images->data_device, outGrads->data_device, denoms,
targets->data_device, num_locs, batch_size, batch_offset,
addScale, powScale, num_filters, sizeF, blocked);
batch_offset += batch_size;
}
hipFree(denoms);
getLastCudaError("_CrossMapRNormUndo: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
void convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
/*
printf("image shape %d %d %d %d\n", images_shape->shape[0], images_shape->shape[1], images_shape->shape[2], images_shape->shape[3]);
printf("filters shape %d %d %d %d\n", filters_shape->shape[0], filters_shape->shape[1], filters_shape->shape[2], filters_shape->shape[3]);
printf("targets shape %d %d %d %d\n", targets_shape->shape[0], targets_shape->shape[1], targets_shape->shape[2], targets_shape->shape[3]);
printf("Convolution : kernel_size_y %d kernel_size_x %d stride_y %d stride_x %d padding_y %d padding_x %d num_input_channels %d num_output_channels %d num_groups %d\n",
conv_desc.kernel_size_y, conv_desc.kernel_size_x, conv_desc.stride_x, conv_desc.stride_y, conv_desc.padding_y, conv_desc.padding_x, conv_desc.num_input_channels,
conv_desc.num_output_channels, conv_desc.num_groups);
*/
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, true);
}
void localUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, false);
}
void MaxPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
MaxPooler pooler;
_convPoolGemm<MaxPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void AvgPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
AvgPooler pooler;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void MaxPoolUndoGemm(cudamat* images, cudamat* maxGrads, cudamat* maxActs,
cudamat* targets, Shape4D* images_shape, Shape4D* maxGrads_shape,
ConvDesc conv_desc, float scaleTargets) {
_maxPoolUndoGemm(images, maxGrads, maxActs, targets, *images_shape,
*maxGrads_shape, conv_desc, scaleTargets, 1);
}
void AvgPoolUndoGemm(cudamat* avgGrads, cudamat* targets, Shape4D* avgGrads_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_avgPoolUndoGemm(avgGrads, targets, *avgGrads_shape, *targets_shape, conv_desc,
scaleTargets, 1);
}
void UpSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, int factor, float scaleTargets) {
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_avgPoolUndoGemm(images, targets, *images_shape, *targets_shape, conv_desc,
scaleTargets, factor * factor);
}
void DownSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor) {
AvgPooler pooler = AvgPooler();
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, 0, 1, pooler);
}
void ResponseNormCrossMapGemm(
cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
_CrossMapRNorm(images, targets, num_filters, sizeF, addScale, powScale, blocked);
}
void ResponseNormCrossMapUndoGemm(
cudamat* outGrads, cudamat* inputs, cudamat* targets, int num_filters,
int sizeF, float addScale, float powScale, bool blocked) {
_CrossMapRNormUndo(outGrads, inputs, targets, num_filters, sizeF, addScale,
powScale, blocked);
}
void Scale(cudamat* mat, float scale) {
_Scale(mat, scale);
}
#ifdef __cplusplus
}
#endif
| 268667c0aef789e4cf9e8ab5c76570cf7173bd03.cu | /** Kernels for convUp, convDown, convOutp, maxpool, avgpool, maxpoolundo,
* avgpoolundo.
* These kernels are 10-20% slower than cuda-convnet2, but have no constraints
* on number of channels and support rectangular images and rectangular kernels.
* They use cublasSgemm for convUp, convDown, convOutp.
* Data layout : Column-major
* data : (num_images, image_size_x, image_size_y, num_input_channels)
* filters : (num_output_channels, kernel_size_x, kernel_size_y, num_input_channels)
*/
#include "cudamat_conv_gemm.cuh"
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
size_t free_space_ = 0;
void EstimateFreeSpace() {
/*
size_t total;
cudaMemGetInfo(&free_space_, &total);
//free_space_ >>= 1;
if (free_space_ > 1>>20) {
free_space_ -= 1 << 20; // Just remove 1 MB. This seems to work!
}
*/
// How to get free contiguous space ?
free_space_ = MAX_MEMORY_BYTES;
}
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void _Scale(cudamat* mat, float scale) {
if (scale == 0) {
cudaMemset(mat->data_device, 0, sizeof(float) * mat->size[0] * mat->size[1]);
} else if (scale != 1) {
cublasSscal(mat->size[0] * mat->size[1], scale, mat->data_device, 1);
}
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
__global__ void kExpand(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
int module_id_x = src_module_id % num_modules_x;
int module_id_y = src_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
target_id = num_images * num_modules_batch * (x + kernel_size_x * y);
source_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = 0;
}
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = images[source_id + im];
}
}
__syncthreads();
}
}
}
template <class Pooler>
__global__ void kPool(float* images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t,
float scaleOutput, Pooler pooler) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * num_modules_x * num_modules_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
target_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
int regionSize = (endX - startX) * (endY - startY) * (endT - startT);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = pooler.getBaseValue();
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
source_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T));
val = pooler(val, images[source_id + im]);
}
}
}
targets[target_id + im] = scaleOutput * pooler.output(val, regionSize);
}
}
__syncthreads();
}
__global__ void kAvgPoolUndo(float *derivs, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t, float scaleOutput) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
derivs += num_images * num_modules_x * num_modules_y * color;
targets += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
int regionSize = (endX - startX) * (endY - startY) * (endT - startT);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im] / regionSize;
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im;
atomicAdd(&targets[target_id], val);
__syncthreads();
}
}
}
}
}
}
__global__ void kMaxPoolUndo(float * images, float *derivs, float* maxes, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x, int image_size_t,
int num_modules_y, int num_modules_x, int num_modules_t,
int kernel_size_y, int kernel_size_x, int kernel_size_t,
int padding_y, int padding_x, int padding_t,
int stride_y, int stride_x, int stride_t, float scaleOutput) {
const int color = blockIdx.y;
const int num_colors = gridDim.y;
const int num_modules = num_modules_y * num_modules_x * num_modules_t;
long source_id, target_id;
derivs += num_images * num_modules_x * num_modules_y * color;
maxes += num_images * num_modules_x * num_modules_y * color;
targets += num_images * image_size_x * image_size_y * color;
images += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = (module_id / num_modules_x) % num_modules_y;
int module_id_t = (module_id / num_modules_x) / num_modules_y;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int startT = module_id_t * stride_t + padding_t;
int endX = startX + kernel_size_x;
int endY = startY + kernel_size_y;
int endT = startT + kernel_size_t;
source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t));
startX = MAX(startX, 0);
startY = MAX(startY, 0);
startT = MAX(startT, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
endT = MIN(endT , image_size_t);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im];
for (int T = startT; T < endT; T++) {
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im;
if (images[target_id] == maxes[source_id + im]) {
atomicAdd(&targets[target_id], val);
}
__syncthreads();
}
}
}
}
}
}
__global__ void kContract(float *expanded_data, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int dst_module_id = module_id_offset + blockIdx.x;
int src_module_id = blockIdx.x;
int module_id_x = dst_module_id % num_modules_x;
int module_id_y = dst_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
targets += num_images * image_size_x * image_size_y * color;
expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
source_id = num_images * num_modules_batch * (x + kernel_size_x * y);
target_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
// do nothing.
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
atomicAdd(&targets[target_id + im], expanded_data[source_id + im]);
__syncthreads();
}
}
}
}
}
__global__ void kWriteRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = beta * data[im];
}
}
__global__ void kReadRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset) {
int c = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
data += num_images * (src_module_id + c * num_modules);
target += num_images * (dst_module_id + c * num_modules_batch);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = data[im];
}
}
__global__ void kWriteRowsMult(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
}
__global__ void kCrossMapDenoms(float* data, float* denoms,
int num_locs, int batch_locs, int batch_offset, float addScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k/2 + j;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
denoms[j * batch_locs] = 1 + addScale * sum;
}
}
}
__global__ void kCrossMapRNorm(float* data, float* target,
int num_locs, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += loc_id;
target += loc_id;
if (loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k/2 + j;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
target[j * num_locs] = data[j * num_locs] * __powf(1 + addScale * sum, -powScale);
}
}
}
__global__ void kCrossMapRNormUndo(float* data, float* deriv, float* denoms, float* target,
int num_locs, int batch_locs, int batch_offset, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
target += batch_offset + loc_id;
deriv += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : -k + k/2 + j + 1;
int end = MIN(num_filters, start + k);
start = MAX(0, start);
for (int i = start; i < end; i++) {
sum += deriv[i * num_locs] * data[i * num_locs] * __powf(denoms[i * batch_locs], -powScale - 1);
}
target[j * num_locs] = deriv[j * num_locs] * __powf(denoms[j * batch_locs], -powScale) -
2 * addScale * powScale * data[j * num_locs] * sum;
}
}
}
void _convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D images_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_images = NULL, *expanded_target = NULL;
int num_modules_batch;
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
//max_batch_size = MAX(max_batch_size, 1);
//printf("Free space %ld max batch size %d\n", free_space, max_batch_size);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_target, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Could not allocate memory.\n");
/*
if (cudaSuccess == err1) cudaFree(expanded_images);
if (cudaSuccess == err2) cudaFree(expanded_target);
err1 = cudaMalloc((void**)&expanded_images, input_memory_size);
err2 = cudaMalloc((void**)&expanded_target, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 threads(num_threads_x);
dim3 blocks = dim3(this_num_modules_batch, num_input_channels);
kExpand<<<blocks, threads>>>(images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
cublasSgemm('n', 't',
num_images * this_num_modules_batch, num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
1, expanded_images, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
dim3 blocks2 = dim3(this_num_modules_batch, num_output_channels);
if (scaleTargets == 0) {
kWriteRows<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleOutput);
} else {
kWriteRowsMult<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleTargets, scaleOutput);
}
module_id_start += this_num_modules_batch;
}
cudaFree(expanded_images);
cudaFree(expanded_target);
getLastCudaError("convUpGemm: kernel execution failed");
}
void _convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D derivs_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_target = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_target, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_target, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory\n");
/*
if (cudaSuccess == err1) cudaFree(expanded_target);
if (cudaSuccess == err2) cudaFree(expanded_derivs);
err1 = cudaMalloc((void**)&expanded_target, input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
_Scale(targets, scaleTargets);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
dim3 threads(num_threads_x);
kReadRows<<<blocks, threads>>>(derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
cublasSgemm('n', 'n',
num_images * this_num_modules_batch, kernel_size_x * kernel_size_y * num_input_channels,
num_output_channels,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
kContract<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
module_id_start += this_num_modules_batch;
}
cudaFree(expanded_derivs);
cudaFree(expanded_target);
getLastCudaError("convDownGemm: kernel execution failed");
}
void _convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D images_shape, Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3Mult = targets_shape.shape[3];
int kernel_size_y2 = targets_shape.shape[2];
int kernel_size_x2 = targets_shape.shape[1];
int num_output_channels3 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels * filterModuleMult == num_input_channels3Mult);
assert (num_images == images->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels3Mult == targets->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK);
float *expanded_images = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_images, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory.\n");
/*
if (cudaSuccess == err1) cudaFree(expanded_images);
if (cudaSuccess == err2) cudaFree(expanded_derivs);
err1 = cudaMalloc((void**)&expanded_images, input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
*/
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
_Scale(targets, scaleTargets);
int module_id_start = 0;
dim3 threads(num_threads_x);
float* dw = targets->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
kReadRows<<<blocks, threads>>>(derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
kExpand<<<blocks2, threads>>>(images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) dw += num_output_channels * input_size;
cublasSgemm('t', 'n',
num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
num_images * this_num_modules_batch,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
expanded_images, num_images * this_num_modules_batch,
1, dw, num_output_channels);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
module_id_start += this_num_modules_batch;
}
cudaFree(expanded_derivs);
cudaFree(expanded_images);
getLastCudaError("convOutpGemm: kernel execution failed");
}
template <class Pooler>
void _convPoolGemm(cudamat* images, cudamat* targets,
Shape4D images_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, Pooler pooler) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kPool<<<blocks, threads>>>(images->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput,
pooler);
getLastCudaError("convLocalPool: kernel execution failed");
}
void _avgPoolUndoGemm(cudamat* derivs, cudamat* targets,
Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kAvgPoolUndo<<<blocks, threads>>>(derivs->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _maxPoolUndoGemm(cudamat* images, cudamat* derivs, cudamat* maxes,
cudamat* targets, Shape4D targets_shape,
Shape4D derivs_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int kernel_size_t = conv_desc.kernel_size_t;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int stride_t = conv_desc.stride_t;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int padding_t = conv_desc.padding_t;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int image_size_t = num_input_channels2 / num_input_channels;
int num_modules_t = num_output_channels2 / num_output_channels;
int num_modules = num_modules_y * num_modules_x * num_modules_t;
// Consistency checks.
assert (num_images == num_images2);
assert (num_input_channels2 % image_size_t == 0);
assert (num_input_channels == num_input_channels2 / image_size_t);
assert (num_output_channels2 % num_modules_t == 0);
assert (num_output_channels == num_output_channels2 / num_modules_t);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
_Scale(targets, scaleTargets);
dim3 threads(NUM_THREADS_PER_BLOCK);
int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kMaxPoolUndo<<<blocks, threads>>>(images->data_device, derivs->data_device,
maxes->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x, image_size_t,
num_modules_y, num_modules_x, num_modules_t,
kernel_size_y, kernel_size_x, kernel_size_t,
padding_y, padding_x, padding_t,
stride_y, stride_x, stride_t, scaleOutput);
getLastCudaError("maxPoolUndo: kernel execution failed");
}
void _CrossMapRNorm(cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int num_blocks = DIVUP(num_locs, NUM_THREADS_PER_BLOCK);
kCrossMapRNorm<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, targets->data_device,
num_locs, addScale, powScale, num_filters, sizeF, blocked);
getLastCudaError("_CrossMapRNorm: kernel execution failed");
}
void _CrossMapRNormUndo(cudamat* outGrads, cudamat* images, cudamat* targets,
int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int batch_offset = 0;
float *denoms;
if (free_space_ == 0) EstimateFreeSpace();
int max_batch_size = free_space_ / (sizeof(float) * num_filters);
max_batch_size = MIN(num_locs, max_batch_size);
max_batch_size = MIN(num_locs, MAX_BLOCKS_X);
cudaError_t err;
err = cudaMalloc((void**)&denoms, max_batch_size * num_filters * sizeof(float));
if (cudaSuccess != err) {
printf("Out of memory on GPU!\n");
}
int num_batches = DIVUP(num_locs, max_batch_size);
for (int i = 0; i < num_batches; i++) {
int batch_size = MIN(max_batch_size, num_locs - batch_offset);
int num_blocks = DIVUP(batch_size, NUM_THREADS_PER_BLOCK);
kCrossMapDenoms<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, denoms, num_locs, batch_size,
batch_offset, addScale, num_filters, sizeF, blocked);
kCrossMapRNormUndo<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, outGrads->data_device, denoms,
targets->data_device, num_locs, batch_size, batch_offset,
addScale, powScale, num_filters, sizeF, blocked);
batch_offset += batch_size;
}
cudaFree(denoms);
getLastCudaError("_CrossMapRNormUndo: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
void convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
/*
printf("image shape %d %d %d %d\n", images_shape->shape[0], images_shape->shape[1], images_shape->shape[2], images_shape->shape[3]);
printf("filters shape %d %d %d %d\n", filters_shape->shape[0], filters_shape->shape[1], filters_shape->shape[2], filters_shape->shape[3]);
printf("targets shape %d %d %d %d\n", targets_shape->shape[0], targets_shape->shape[1], targets_shape->shape[2], targets_shape->shape[3]);
printf("Convolution : kernel_size_y %d kernel_size_x %d stride_y %d stride_x %d padding_y %d padding_x %d num_input_channels %d num_output_channels %d num_groups %d\n",
conv_desc.kernel_size_y, conv_desc.kernel_size_x, conv_desc.stride_x, conv_desc.stride_y, conv_desc.padding_y, conv_desc.padding_x, conv_desc.num_input_channels,
conv_desc.num_output_channels, conv_desc.num_groups);
*/
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, true);
}
void localUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, false);
}
void MaxPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
MaxPooler pooler;
_convPoolGemm<MaxPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void AvgPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
AvgPooler pooler;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void MaxPoolUndoGemm(cudamat* images, cudamat* maxGrads, cudamat* maxActs,
cudamat* targets, Shape4D* images_shape, Shape4D* maxGrads_shape,
ConvDesc conv_desc, float scaleTargets) {
_maxPoolUndoGemm(images, maxGrads, maxActs, targets, *images_shape,
*maxGrads_shape, conv_desc, scaleTargets, 1);
}
void AvgPoolUndoGemm(cudamat* avgGrads, cudamat* targets, Shape4D* avgGrads_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_avgPoolUndoGemm(avgGrads, targets, *avgGrads_shape, *targets_shape, conv_desc,
scaleTargets, 1);
}
void UpSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, int factor, float scaleTargets) {
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_avgPoolUndoGemm(images, targets, *images_shape, *targets_shape, conv_desc,
scaleTargets, factor * factor);
}
void DownSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor) {
AvgPooler pooler = AvgPooler();
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, 0, 1, pooler);
}
void ResponseNormCrossMapGemm(
cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
_CrossMapRNorm(images, targets, num_filters, sizeF, addScale, powScale, blocked);
}
void ResponseNormCrossMapUndoGemm(
cudamat* outGrads, cudamat* inputs, cudamat* targets, int num_filters,
int sizeF, float addScale, float powScale, bool blocked) {
_CrossMapRNormUndo(outGrads, inputs, targets, num_filters, sizeF, addScale,
powScale, blocked);
}
void Scale(cudamat* mat, float scale) {
_Scale(mat, scale);
}
#ifdef __cplusplus
}
#endif
|
2ead027a20258c7b83876a6b893f70dad17901f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define _GNU_SOURCE
#include <stdio.h>
#include <stdint.h>
#include "include/mpz.h"
#include <assert.h>
#include <unistd.h>
static char usage[] = "usage: %s -n n_samles -t n_div_thread_limit\n";
#define N 32
#include <time.h>
#include <sys/time.h>
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
__global__
void loop(long long int* a, int n, long long int* t) {
long long int t1, t2;
uint64_t i;
//uint64_t temp;
t[threadIdx.x] = threadIdx.x;
a[threadIdx.x] = 0;
for(i=0;i<10000000;i++){
a[threadIdx.x] += 88;
if(n==3333333){ //never true, to make sure line above is executed
a[threadIdx.x] = 11111;//powf(float(a[threadIdx.x]),float(n));
}
}
t1 = clock64();
//temp = powf(float(a[threadIdx.x]),float(n));
if(threadIdx.x >= n){ // branch that causes divergence
// for(uint64_t j=0;j<10000;j++){
//a[threadIdx.x] += powf(float(a[threadIdx.x]),float(n));
//asm("sub.s64 %rd20, %rd19, 77777;")
t[threadIdx.x] += 22;
// }
//t[threadIdx.x] = 99;
//for(j=0;j<77777;j++){
// if(n==5555555){ //never true, to make sure line above is executed
// a[threadIdx.x] = 6666;//powf(float(a[threadIdx.x]),float(n));
// }
//}
//a[threadIdx.x] -= temp - 777;
} else {
//t[threadIdx.x] *= 33;
//asm("add.s64 %rd32, %rd33, %rd34;");
//a[threadIdx.x] += powf(float(a[threadIdx.x]),float(n));
//asm("add.s64 %rd20, %rd19, 77777;");
}
t2 = clock64();
a[threadIdx.x] = t2 - t1;
t[threadIdx.x] = threadIdx.x;
}
int main(int argc, char ** argv) {
char c;
int n_samples = -1;
int div_threads = -1;
while ((c = getopt(argc, argv, "t:n:")) != -1)
switch (c) {
case 'n':
n_samples = atoi(optarg);
break;
case 't':
div_threads = atoi(optarg);
break;
case '?':
fprintf(stderr, usage, argv[0]);
exit(1);
break;
}
if(n_samples <= 0 || div_threads <= 0){
fprintf(stderr, "Wrong parameters! Must specify -n and -t\n");
exit(1);
}
//allocate
uint64_t *probes;
probes = (uint64_t*) malloc(n_samples * sizeof(uint64_t));
assert(probes);
long long int* a, *t;
long long int* a_d, *t_d;
hipMalloc(&a_d, sizeof(long long int) * N);
hipMalloc(&t_d, sizeof(long long int) * N);
a = (long long int *) malloc(sizeof(long long int) * N);
t = (long long int *) malloc(sizeof(long long int) * N);
//hipMemset(a_d, 333, N);
//hipEvent_t start, stop;
//float time;
//hipEventCreate(&start);
//hipEventCreate(&stop);
for(int i=0;i<n_samples;i++){
//*a = 1;
//hipEventRecord(start, 0);
hipLaunchKernelGGL(( loop), dim3(1), dim3(N), 0, 0, a_d, 31, t_d);
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
hipDeviceSynchronize();
hipMemcpy(a, a_d, sizeof(long long int) * N, hipMemcpyDefault);
hipMemcpy(t, t_d, sizeof(long long int) * N, hipMemcpyDefault);
//hipEventElapsedTime(&time, start, stop);
printf ("%lld\n", *a);
}
printf("---------\n");
for(int i=0;i<n_samples;i++){
//*a = 1;
//hipEventRecord(start, 0);
hipLaunchKernelGGL(( loop), dim3(1), dim3(N), 0, 0, a_d, 100, t_d);
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
hipDeviceSynchronize();
hipMemcpy(a, a_d, sizeof(long long int) * N, hipMemcpyDefault);
hipMemcpy(t, t_d, sizeof(long long int) * N, hipMemcpyDefault);
//hipEventElapsedTime(&time, start, stop);
printf ("%lld\n", *a);
}
// // printf("a: %d\n", *a);
// // for (int j=0;j<N;j++){
// // printf("%d:%d\t", j, t[j]);
// // }
// // printf("\n");
hipFree(&a);
hipFree(&t);
free(a);
free(t);
// Retrieve result from device and store it in host array
return 0;
}
| 2ead027a20258c7b83876a6b893f70dad17901f3.cu | //#define _GNU_SOURCE
#include <stdio.h>
#include <stdint.h>
#include "include/mpz.h"
#include <assert.h>
#include <unistd.h>
static char usage[] = "usage: %s -n n_samles -t n_div_thread_limit\n";
#define N 32
#include <time.h>
#include <sys/time.h>
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
__global__
void loop(long long int* a, int n, long long int* t) {
long long int t1, t2;
uint64_t i;
//uint64_t temp;
t[threadIdx.x] = threadIdx.x;
a[threadIdx.x] = 0;
for(i=0;i<10000000;i++){
a[threadIdx.x] += 88;
if(n==3333333){ //never true, to make sure line above is executed
a[threadIdx.x] = 11111;//powf(float(a[threadIdx.x]),float(n));
}
}
t1 = clock64();
//temp = powf(float(a[threadIdx.x]),float(n));
if(threadIdx.x >= n){ // branch that causes divergence
// for(uint64_t j=0;j<10000;j++){
//a[threadIdx.x] += powf(float(a[threadIdx.x]),float(n));
//asm("sub.s64 %rd20, %rd19, 77777;")
t[threadIdx.x] += 22;
// }
//t[threadIdx.x] = 99;
//for(j=0;j<77777;j++){
// if(n==5555555){ //never true, to make sure line above is executed
// a[threadIdx.x] = 6666;//powf(float(a[threadIdx.x]),float(n));
// }
//}
//a[threadIdx.x] -= temp - 777;
} else {
//t[threadIdx.x] *= 33;
//asm("add.s64 %rd32, %rd33, %rd34;");
//a[threadIdx.x] += powf(float(a[threadIdx.x]),float(n));
//asm("add.s64 %rd20, %rd19, 77777;");
}
t2 = clock64();
a[threadIdx.x] = t2 - t1;
t[threadIdx.x] = threadIdx.x;
}
int main(int argc, char ** argv) {
char c;
int n_samples = -1;
int div_threads = -1;
while ((c = getopt(argc, argv, "t:n:")) != -1)
switch (c) {
case 'n':
n_samples = atoi(optarg);
break;
case 't':
div_threads = atoi(optarg);
break;
case '?':
fprintf(stderr, usage, argv[0]);
exit(1);
break;
}
if(n_samples <= 0 || div_threads <= 0){
fprintf(stderr, "Wrong parameters! Must specify -n and -t\n");
exit(1);
}
//allocate
uint64_t *probes;
probes = (uint64_t*) malloc(n_samples * sizeof(uint64_t));
assert(probes);
long long int* a, *t;
long long int* a_d, *t_d;
cudaMalloc(&a_d, sizeof(long long int) * N);
cudaMalloc(&t_d, sizeof(long long int) * N);
a = (long long int *) malloc(sizeof(long long int) * N);
t = (long long int *) malloc(sizeof(long long int) * N);
//cudaMemset(a_d, 333, N);
//cudaEvent_t start, stop;
//float time;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
for(int i=0;i<n_samples;i++){
//*a = 1;
//cudaEventRecord(start, 0);
loop<<<1, N>>>(a_d, 31, t_d);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cudaMemcpy(a, a_d, sizeof(long long int) * N, cudaMemcpyDefault);
cudaMemcpy(t, t_d, sizeof(long long int) * N, cudaMemcpyDefault);
//cudaEventElapsedTime(&time, start, stop);
printf ("%lld\n", *a);
}
printf("---------\n");
for(int i=0;i<n_samples;i++){
//*a = 1;
//cudaEventRecord(start, 0);
loop<<<1, N>>>(a_d, 100, t_d);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cudaMemcpy(a, a_d, sizeof(long long int) * N, cudaMemcpyDefault);
cudaMemcpy(t, t_d, sizeof(long long int) * N, cudaMemcpyDefault);
//cudaEventElapsedTime(&time, start, stop);
printf ("%lld\n", *a);
}
// // printf("a: %d\n", *a);
// // for (int j=0;j<N;j++){
// // printf("%d:%d\t", j, t[j]);
// // }
// // printf("\n");
cudaFree(&a);
cudaFree(&t);
free(a);
free(t);
// Retrieve result from device and store it in host array
return 0;
}
|
d77e51fe5c4b72c189ec3af2075028c72dc38249.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define DATATYPE int
#define SMEMSIZE 1024
#define REP 128
//#define conflictnum 8
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its, int conflictnum)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum);
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum)
{
int its=30;
DATATYPE *d_in1,*d_in2;
hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
hipMalloc((void**)&d_time,sizeof(double)*blocks*threads);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
hipLaunchKernelGGL(( shared_model_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its,conflictnum);
hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt);
hipFree(d_time);
hipFree(d_out);
hipFree(d_in1);
hipFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("%s <conflictnum> \n", argv[0]);
} else {
int value = atoi(argv[1]);
DATATYPE *h_in1;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, value);
}
free(h_in1);
}
return 0;
} | d77e51fe5c4b72c189ec3af2075028c72dc38249.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define DATATYPE int
#define SMEMSIZE 1024
#define REP 128
//#define conflictnum 8
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its, int conflictnum)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum);
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum)
{
int its=30;
DATATYPE *d_in1,*d_in2;
cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
shared_model_1<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its,conflictnum);
cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt);
cudaFree(d_time);
cudaFree(d_out);
cudaFree(d_in1);
cudaFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("%s <conflictnum> \n", argv[0]);
} else {
int value = atoi(argv[1]);
DATATYPE *h_in1;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, value);
}
free(h_in1);
}
return 0;
} |
ad152ba51ded7028dd020edade107c0004eaee56.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/cudf_test_utils.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
template <class ColumnType>
struct DigitizeTest : public GdfTest {
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
std::vector<ColumnType> col_in_data;
std::vector<ColumnType> bins_data;
std::vector<gdf_index_type> out_data;
gdf_col_pointer col_in;
gdf_col_pointer bins;
DigitizeTest(){
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~DigitizeTest(){}
void initialize_data(size_t column_length, size_t column_range,
size_t bins_length, size_t bins_range)
{
initialize_vector(col_in_data, column_length, column_range, false);
col_in = create_gdf_column(col_in_data);
initialize_vector(bins_data, bins_length, bins_range, true);
bins = create_gdf_column(bins_data);
}
gdf_error digitize(bool right) {
rmm::device_vector<gdf_index_type> out_indices_dev(col_in->size);
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
out_data.resize(out_indices_dev.size());
hipMemcpy(out_data.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(gdf_index_type),
hipMemcpyDeviceToHost);
return result;
}
};
typedef ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double> ValidGdfTypes;
TYPED_TEST_CASE(DigitizeTest, ValidGdfTypes);
TYPED_TEST(DigitizeTest, UpperBound)
{
this->initialize_data(1000, 56, 4, 100);
gdf_error result = this->digitize(true);
EXPECT_EQ(result, GDF_SUCCESS);
}
TYPED_TEST(DigitizeTest, LowerBound)
{
this->initialize_data(10000, 60, 10, 100);
gdf_error result = this->digitize(false);
EXPECT_EQ(result, GDF_SUCCESS);
}
void digitize_detail(bool right, const std::vector<int32_t>& expected) {
std::vector<double> bins_data{0, 2, 5, 7, 8};
gdf_col_pointer bins = create_gdf_column(bins_data);
std::vector<double> col_in_data{-10, 0, 1, 2, 3, 8, 9};
gdf_col_pointer col_in = create_gdf_column(col_in_data);
rmm::device_vector<gdf_index_type> out_indices_dev(col_in_data.size());
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
std::vector<gdf_index_type> out_indices(out_indices_dev.size());
hipMemcpy(out_indices.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(gdf_index_type),
hipMemcpyDeviceToHost);
EXPECT_EQ(result, GDF_SUCCESS);
const size_t num_rows = col_in_data.size();
for (unsigned int i = 0; i < num_rows; ++i) {
EXPECT_EQ(expected[i], out_indices[i]);
}
}
TEST(DigitizeTest, UpperBoundDetail) {
std::vector<int32_t> expected{0, 0, 1, 1, 2, 4, 5};
digitize_detail(true, expected);
}
TEST(DigitizeTest, LowerBoundDetail) {
std::vector<int32_t> expected{0, 1, 1, 2, 2, 5, 5};
digitize_detail(false, expected);
}
| ad152ba51ded7028dd020edade107c0004eaee56.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/cudf_test_utils.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
template <class ColumnType>
struct DigitizeTest : public GdfTest {
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
std::vector<ColumnType> col_in_data;
std::vector<ColumnType> bins_data;
std::vector<gdf_index_type> out_data;
gdf_col_pointer col_in;
gdf_col_pointer bins;
DigitizeTest(){
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~DigitizeTest(){}
void initialize_data(size_t column_length, size_t column_range,
size_t bins_length, size_t bins_range)
{
initialize_vector(col_in_data, column_length, column_range, false);
col_in = create_gdf_column(col_in_data);
initialize_vector(bins_data, bins_length, bins_range, true);
bins = create_gdf_column(bins_data);
}
gdf_error digitize(bool right) {
rmm::device_vector<gdf_index_type> out_indices_dev(col_in->size);
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
out_data.resize(out_indices_dev.size());
cudaMemcpy(out_data.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(gdf_index_type),
cudaMemcpyDeviceToHost);
return result;
}
};
typedef ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double> ValidGdfTypes;
TYPED_TEST_CASE(DigitizeTest, ValidGdfTypes);
TYPED_TEST(DigitizeTest, UpperBound)
{
this->initialize_data(1000, 56, 4, 100);
gdf_error result = this->digitize(true);
EXPECT_EQ(result, GDF_SUCCESS);
}
TYPED_TEST(DigitizeTest, LowerBound)
{
this->initialize_data(10000, 60, 10, 100);
gdf_error result = this->digitize(false);
EXPECT_EQ(result, GDF_SUCCESS);
}
void digitize_detail(bool right, const std::vector<int32_t>& expected) {
std::vector<double> bins_data{0, 2, 5, 7, 8};
gdf_col_pointer bins = create_gdf_column(bins_data);
std::vector<double> col_in_data{-10, 0, 1, 2, 3, 8, 9};
gdf_col_pointer col_in = create_gdf_column(col_in_data);
rmm::device_vector<gdf_index_type> out_indices_dev(col_in_data.size());
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
std::vector<gdf_index_type> out_indices(out_indices_dev.size());
cudaMemcpy(out_indices.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(gdf_index_type),
cudaMemcpyDeviceToHost);
EXPECT_EQ(result, GDF_SUCCESS);
const size_t num_rows = col_in_data.size();
for (unsigned int i = 0; i < num_rows; ++i) {
EXPECT_EQ(expected[i], out_indices[i]);
}
}
TEST(DigitizeTest, UpperBoundDetail) {
std::vector<int32_t> expected{0, 0, 1, 1, 2, 4, 5};
digitize_detail(true, expected);
}
TEST(DigitizeTest, LowerBoundDetail) {
std::vector<int32_t> expected{0, 1, 1, 2, 2, 5, 5};
digitize_detail(false, expected);
}
|
d3d7119fea5df883baac6cbdf2a8f519fe1fec05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#undef _GLIBCXX_USE_INT128
#include "cuda_sort2.h"
#if 0
#include <thrust/scan.h>
__constant__ __device__ int dirs[9][9][2];
__constant__ __device__ int dirs_inv[9][15];
// 0 - 8 is the 3x3 block of surrounding cells, 9 has additional particles being added
#define S_NEW (9)
#define STRIDE (10)
#define S_OOB (10)
#include <b40c/radixsort_reduction_kernel.h>
#include <b40c/radixsort_spine_kernel.h>
#include <b40c/radixsort_scanscatter_kernel3.h>
using namespace b40c_thrust;
typedef unsigned int K;
typedef unsigned int V;
static const int RADIX_BITS = 4;
//static const int RADIX_DIGITS = 1 << RADIX_BITS;
// blockIdx to rel offset
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
struct xPreShiftFunctor {
__device__ __host__ __forceinline__ void operator()(K &converted_key) {
if (converted_key == NBLOCKS_X * NBLOCKS_Y * NBLOCKS_Z) {
converted_key = S_OOB;
} else {
int b_diff = blockIdx.x - converted_key + NBLOCKS_Y + 1;
int d1 = b_diff % NBLOCKS_Y;
int d2 = b_diff / NBLOCKS_Y;
converted_key = d2 * 3 + d1;
}
}
__device__ __host__ __forceinline__ static bool MustApply(){ return true;}
};
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
__global__ static void
cuda_set_bn_cnts(int *d_bb_cnts, unsigned int *d_bn_cnts)
{
unsigned int b = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (b >= NBLOCKS_X * NBLOCKS_Y * NBLOCKS_Z)
return;
d_bb_cnts[b * STRIDE + S_NEW] = d_bn_cnts[b];
}
__global__ static void
cuda_move_recvd(int *d_bb_sums,
unsigned int *d_bidx, unsigned int *d_alt_bidx,
unsigned int *d_alt_ids,
int n_part, int n_part_prev)
{
int i = n_part_prev + threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i >= n_part)
return;
int idx = d_bidx[i] * STRIDE + S_NEW;
d_alt_ids[d_bb_sums[idx] + d_alt_bidx[i]] = i;
}
__global__ static void
set_new_offsets(int *offsets, int *bb_sums, int nr_blocks)
{
int b = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (b <= nr_blocks) {
offsets[b] = bb_sums[b * STRIDE + 0];
}
}
static void
cuda_set_new_offsets(int *d_offsets, int *d_bb_sums, int nr_blocks)
{
int dimGrid = (nr_blocks + 1 + THREADS_PER_BLOCK - 1)
/ THREADS_PER_BLOCK;
hipLaunchKernelGGL(( set_new_offsets), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0,
d_offsets, d_bb_sums, nr_blocks);
}
class SortPairs3 {
int _nr_blocks;
thrust::device_vector<int> _bb_cnts;
thrust::device_vector<int> _bb_sums;
static int nr_blocks(const int b_mx[3])
{
return b_mx[0] * b_mx[1] * b_mx[2];
}
public:
int _b_mx[3];
SortPairs3(const int b_mx[3]) :
_nr_blocks(nr_blocks(b_mx)),
_bb_cnts(_nr_blocks * (STRIDE+1)),
_bb_sums(_nr_blocks * (STRIDE+1))
{
_b_mx[0] = b_mx[0];
_b_mx[1] = b_mx[1];
_b_mx[2] = b_mx[2];
}
// ----------------------------------------------------------------------
// sort
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void sort(unsigned int *d_bidx, unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int *d_offsets, int n_part_prev, unsigned int *bn_cnts)
{
// static int pr_A, pr_B, pr_C;
// if (!pr_A) {
// pr_A = prof_register("sort_bottom_sum", 1., 0, 0);
// pr_B = prof_register("sort_top_scan", 1., 0, 0);
// pr_C = prof_register("sort_bottom_scan", 1., 0, 0);
// }
// prof_start(pr_A);
reduction<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>
(d_offsets, d_bidx, n_part, n_part_prev, bn_cnts);
// prof_stop(pr_A);
// prof_start(pr_B);
thrust::exclusive_scan(_bb_cnts.begin(), _bb_cnts.end(), _bb_sums.begin());
cuda_sync_if_enabled();
// prof_stop(pr_B);
// prof_start(pr_C);
scan<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>
(d_offsets, d_bidx, d_alt_bidx, d_alt_ids, n_part, n_part_prev);
cuda_set_new_offsets(d_offsets, thrust::raw_pointer_cast(&_bb_sums[0]),
_nr_blocks);
// prof_stop(pr_C);
}
private:
// ----------------------------------------------------------------------
// reduction
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void reduction(int *d_offsets, unsigned int *d_bidx,
int n_part, int n_part_prev, unsigned int *bn_cnts)
{
// OPT, mostly unneeded, could just cache
thrust::fill(_bb_cnts.begin(), _bb_cnts.end(), 0);
// OPT, could leave out real interior counts (== 0)
// copy to device
thrust::device_vector<unsigned int> d_bn_cnts(bn_cnts, bn_cnts + _nr_blocks);
int dimGrid = (n_part + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_set_bn_cnts<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>)
, dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(&_bb_cnts[0]),
thrust::raw_pointer_cast(&d_bn_cnts[0]));
cuda_sync_if_enabled();
// set rest of array by counting
const int threads = B40C_RADIXSORT_THREADS;
hipLaunchKernelGGL(( RakingReduction3x<K, V, 0, RADIX_BITS, 0,
xPreShiftFunctor<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>,
NBLOCKS_Y, NBLOCKS_Z>)
, dim3(_nr_blocks), dim3(threads), 0, 0, thrust::raw_pointer_cast(&_bb_cnts[0]),
d_bidx, d_offsets);
cuda_sync_if_enabled();
}
// ----------------------------------------------------------------------
// reduction_host
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void reduction_host(struct psc_particles *prts, unsigned int *d_bidx,
int n_part, int n_part_prev)
{
int *d_bb_cnts = thrust::raw_pointer_cast(&_bb_cnts[0]);
unsigned int *offsets = (unsigned int *) malloc((_nr_blocks + 1) * sizeof(*offsets));
unsigned int *bidx = (unsigned int *) malloc(prts->n_part * sizeof(*bidx));
int *bb_cnts = (int *) malloc(_nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
memset(bb_cnts, 0, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
hipMemset(d_bb_cnts, 0, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
cuda_copy_offsets_from_dev(prts, offsets);
cuda_copy_bidx_from_dev(prts, bidx, d_bidx);
// go by block for the old particles
for (unsigned int bb = 0; bb < _nr_blocks; bb++) {
for (int i = offsets[bb]; i < offsets[bb+1]; i++) {
int idx;
if (bidx[i] == _nr_blocks) {
idx = _nr_blocks * STRIDE + bb;
} else {
assert(bidx[i] < _nr_blocks);
int b_diff = bb - bidx[i] + NBLOCKS_Y + 1;
int d1 = b_diff % NBLOCKS_Y;
int d2 = b_diff / NBLOCKS_Y;
idx = bidx[i] * STRIDE + (d2 * 3 + d1);
}
bb_cnts[idx]++;
}
}
assert(offsets[_nr_blocks] == n_part_prev);
// then do the new ones
for (int i = n_part_prev; i < prts->n_part; i++) {
assert(bidx[i] < _nr_blocks);
bb_cnts[bidx[i] * STRIDE + S_NEW]++;
}
check(hipMemcpy(d_bb_cnts, bb_cnts, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts),
hipMemcpyHostToDevice));
free(offsets);
free(bidx);
free(bb_cnts);
}
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void scan(int *d_offsets, unsigned int *d_bidx,
unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int n_part_prev)
{
const int threads = B40C_RADIXSORT_THREADS;
hipLaunchKernelGGL(( ScanScatterDigits3x<K, V, 0, RADIX_BITS, 0,
xPreShiftFunctor<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>,
NopFunctor<K>,
NBLOCKS_Y, NBLOCKS_Z>)
, dim3(_nr_blocks), dim3(threads), 0, 0,
thrust::raw_pointer_cast(&_bb_sums[0]), d_bidx, d_alt_ids, d_offsets);
cuda_sync_if_enabled();
int dimsGrid = (n_part - n_part_prev + THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_move_recvd), dim3(dimsGrid), dim3(THREADS_PER_BLOCK), 0, 0,
thrust::raw_pointer_cast(&_bb_sums[0]),
d_bidx, d_alt_bidx, d_alt_ids, n_part, n_part_prev);
cuda_sync_if_enabled();
}
};
// ======================================================================
// C interface
EXTERN_C void *
sort_pairs_3_create(const int b_mx[3])
{
SortPairs3 *sp = new SortPairs3(b_mx);
return (void *) sp;
}
EXTERN_C void
sort_pairs_3_destroy(void *_sp)
{
SortPairs3 *sp = (SortPairs3 *) _sp;
delete sp;
}
EXTERN_C void
sort_pairs_3_device(void *_sp, unsigned int *d_bidx,
unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int *d_offsets,
int n_part_prev, unsigned int *bn_cnts)
{
SortPairs3 *sp = (SortPairs3 *) _sp;
int *b_mx = sp->_b_mx;
if (b_mx[0] == 1 && b_mx[1] == 8 && b_mx[2] == 8) {
sp->sort<1, 8, 8>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 16 && b_mx[2] == 16) {
sp->sort<1, 16, 16>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 32 && b_mx[2] == 32) {
sp->sort<1, 32, 32>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 64 && b_mx[2] == 64) {
sp->sort<1, 64, 64>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 128 && b_mx[2] == 128) {
sp->sort<1, 128, 128>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else {
printf("need to add support for _b_mx %d %d\n", b_mx[1], b_mx[2]);
assert(0);
}
}
#endif
| d3d7119fea5df883baac6cbdf2a8f519fe1fec05.cu |
#undef _GLIBCXX_USE_INT128
#include "cuda_sort2.h"
#if 0
#include <thrust/scan.h>
__constant__ __device__ int dirs[9][9][2];
__constant__ __device__ int dirs_inv[9][15];
// 0 - 8 is the 3x3 block of surrounding cells, 9 has additional particles being added
#define S_NEW (9)
#define STRIDE (10)
#define S_OOB (10)
#include <b40c/radixsort_reduction_kernel.h>
#include <b40c/radixsort_spine_kernel.h>
#include <b40c/radixsort_scanscatter_kernel3.h>
using namespace b40c_thrust;
typedef unsigned int K;
typedef unsigned int V;
static const int RADIX_BITS = 4;
//static const int RADIX_DIGITS = 1 << RADIX_BITS;
// blockIdx to rel offset
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
struct xPreShiftFunctor {
__device__ __host__ __forceinline__ void operator()(K &converted_key) {
if (converted_key == NBLOCKS_X * NBLOCKS_Y * NBLOCKS_Z) {
converted_key = S_OOB;
} else {
int b_diff = blockIdx.x - converted_key + NBLOCKS_Y + 1;
int d1 = b_diff % NBLOCKS_Y;
int d2 = b_diff / NBLOCKS_Y;
converted_key = d2 * 3 + d1;
}
}
__device__ __host__ __forceinline__ static bool MustApply(){ return true;}
};
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
__global__ static void
cuda_set_bn_cnts(int *d_bb_cnts, unsigned int *d_bn_cnts)
{
unsigned int b = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (b >= NBLOCKS_X * NBLOCKS_Y * NBLOCKS_Z)
return;
d_bb_cnts[b * STRIDE + S_NEW] = d_bn_cnts[b];
}
__global__ static void
cuda_move_recvd(int *d_bb_sums,
unsigned int *d_bidx, unsigned int *d_alt_bidx,
unsigned int *d_alt_ids,
int n_part, int n_part_prev)
{
int i = n_part_prev + threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i >= n_part)
return;
int idx = d_bidx[i] * STRIDE + S_NEW;
d_alt_ids[d_bb_sums[idx] + d_alt_bidx[i]] = i;
}
__global__ static void
set_new_offsets(int *offsets, int *bb_sums, int nr_blocks)
{
int b = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (b <= nr_blocks) {
offsets[b] = bb_sums[b * STRIDE + 0];
}
}
static void
cuda_set_new_offsets(int *d_offsets, int *d_bb_sums, int nr_blocks)
{
int dimGrid = (nr_blocks + 1 + THREADS_PER_BLOCK - 1)
/ THREADS_PER_BLOCK;
set_new_offsets<<<dimGrid, THREADS_PER_BLOCK>>>
(d_offsets, d_bb_sums, nr_blocks);
}
class SortPairs3 {
int _nr_blocks;
thrust::device_vector<int> _bb_cnts;
thrust::device_vector<int> _bb_sums;
static int nr_blocks(const int b_mx[3])
{
return b_mx[0] * b_mx[1] * b_mx[2];
}
public:
int _b_mx[3];
SortPairs3(const int b_mx[3]) :
_nr_blocks(nr_blocks(b_mx)),
_bb_cnts(_nr_blocks * (STRIDE+1)),
_bb_sums(_nr_blocks * (STRIDE+1))
{
_b_mx[0] = b_mx[0];
_b_mx[1] = b_mx[1];
_b_mx[2] = b_mx[2];
}
// ----------------------------------------------------------------------
// sort
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void sort(unsigned int *d_bidx, unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int *d_offsets, int n_part_prev, unsigned int *bn_cnts)
{
// static int pr_A, pr_B, pr_C;
// if (!pr_A) {
// pr_A = prof_register("sort_bottom_sum", 1., 0, 0);
// pr_B = prof_register("sort_top_scan", 1., 0, 0);
// pr_C = prof_register("sort_bottom_scan", 1., 0, 0);
// }
// prof_start(pr_A);
reduction<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>
(d_offsets, d_bidx, n_part, n_part_prev, bn_cnts);
// prof_stop(pr_A);
// prof_start(pr_B);
thrust::exclusive_scan(_bb_cnts.begin(), _bb_cnts.end(), _bb_sums.begin());
cuda_sync_if_enabled();
// prof_stop(pr_B);
// prof_start(pr_C);
scan<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>
(d_offsets, d_bidx, d_alt_bidx, d_alt_ids, n_part, n_part_prev);
cuda_set_new_offsets(d_offsets, thrust::raw_pointer_cast(&_bb_sums[0]),
_nr_blocks);
// prof_stop(pr_C);
}
private:
// ----------------------------------------------------------------------
// reduction
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void reduction(int *d_offsets, unsigned int *d_bidx,
int n_part, int n_part_prev, unsigned int *bn_cnts)
{
// OPT, mostly unneeded, could just cache
thrust::fill(_bb_cnts.begin(), _bb_cnts.end(), 0);
// OPT, could leave out real interior counts (== 0)
// copy to device
thrust::device_vector<unsigned int> d_bn_cnts(bn_cnts, bn_cnts + _nr_blocks);
int dimGrid = (n_part + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cuda_set_bn_cnts<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>
<<<dimGrid, THREADS_PER_BLOCK>>> (thrust::raw_pointer_cast(&_bb_cnts[0]),
thrust::raw_pointer_cast(&d_bn_cnts[0]));
cuda_sync_if_enabled();
// set rest of array by counting
const int threads = B40C_RADIXSORT_THREADS;
RakingReduction3x<K, V, 0, RADIX_BITS, 0,
xPreShiftFunctor<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>,
NBLOCKS_Y, NBLOCKS_Z>
<<<_nr_blocks, threads>>> (thrust::raw_pointer_cast(&_bb_cnts[0]),
d_bidx, d_offsets);
cuda_sync_if_enabled();
}
// ----------------------------------------------------------------------
// reduction_host
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void reduction_host(struct psc_particles *prts, unsigned int *d_bidx,
int n_part, int n_part_prev)
{
int *d_bb_cnts = thrust::raw_pointer_cast(&_bb_cnts[0]);
unsigned int *offsets = (unsigned int *) malloc((_nr_blocks + 1) * sizeof(*offsets));
unsigned int *bidx = (unsigned int *) malloc(prts->n_part * sizeof(*bidx));
int *bb_cnts = (int *) malloc(_nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
memset(bb_cnts, 0, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
cudaMemset(d_bb_cnts, 0, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts));
cuda_copy_offsets_from_dev(prts, offsets);
cuda_copy_bidx_from_dev(prts, bidx, d_bidx);
// go by block for the old particles
for (unsigned int bb = 0; bb < _nr_blocks; bb++) {
for (int i = offsets[bb]; i < offsets[bb+1]; i++) {
int idx;
if (bidx[i] == _nr_blocks) {
idx = _nr_blocks * STRIDE + bb;
} else {
assert(bidx[i] < _nr_blocks);
int b_diff = bb - bidx[i] + NBLOCKS_Y + 1;
int d1 = b_diff % NBLOCKS_Y;
int d2 = b_diff / NBLOCKS_Y;
idx = bidx[i] * STRIDE + (d2 * 3 + d1);
}
bb_cnts[idx]++;
}
}
assert(offsets[_nr_blocks] == n_part_prev);
// then do the new ones
for (int i = n_part_prev; i < prts->n_part; i++) {
assert(bidx[i] < _nr_blocks);
bb_cnts[bidx[i] * STRIDE + S_NEW]++;
}
check(cudaMemcpy(d_bb_cnts, bb_cnts, _nr_blocks * (STRIDE+1) * sizeof(*bb_cnts),
cudaMemcpyHostToDevice));
free(offsets);
free(bidx);
free(bb_cnts);
}
template<int NBLOCKS_X, int NBLOCKS_Y, int NBLOCKS_Z>
void scan(int *d_offsets, unsigned int *d_bidx,
unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int n_part_prev)
{
const int threads = B40C_RADIXSORT_THREADS;
ScanScatterDigits3x<K, V, 0, RADIX_BITS, 0,
xPreShiftFunctor<NBLOCKS_X, NBLOCKS_Y, NBLOCKS_Z>,
NopFunctor<K>,
NBLOCKS_Y, NBLOCKS_Z>
<<<_nr_blocks, threads>>>
(thrust::raw_pointer_cast(&_bb_sums[0]), d_bidx, d_alt_ids, d_offsets);
cuda_sync_if_enabled();
int dimsGrid = (n_part - n_part_prev + THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
cuda_move_recvd<<<dimsGrid, THREADS_PER_BLOCK>>>
(thrust::raw_pointer_cast(&_bb_sums[0]),
d_bidx, d_alt_bidx, d_alt_ids, n_part, n_part_prev);
cuda_sync_if_enabled();
}
};
// ======================================================================
// C interface
EXTERN_C void *
sort_pairs_3_create(const int b_mx[3])
{
SortPairs3 *sp = new SortPairs3(b_mx);
return (void *) sp;
}
EXTERN_C void
sort_pairs_3_destroy(void *_sp)
{
SortPairs3 *sp = (SortPairs3 *) _sp;
delete sp;
}
EXTERN_C void
sort_pairs_3_device(void *_sp, unsigned int *d_bidx,
unsigned int *d_alt_bidx, unsigned int *d_alt_ids,
int n_part, int *d_offsets,
int n_part_prev, unsigned int *bn_cnts)
{
SortPairs3 *sp = (SortPairs3 *) _sp;
int *b_mx = sp->_b_mx;
if (b_mx[0] == 1 && b_mx[1] == 8 && b_mx[2] == 8) {
sp->sort<1, 8, 8>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 16 && b_mx[2] == 16) {
sp->sort<1, 16, 16>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 32 && b_mx[2] == 32) {
sp->sort<1, 32, 32>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 64 && b_mx[2] == 64) {
sp->sort<1, 64, 64>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else if (b_mx[0] == 1 && b_mx[1] == 128 && b_mx[2] == 128) {
sp->sort<1, 128, 128>(d_bidx, d_alt_bidx, d_alt_ids,
n_part, d_offsets, n_part_prev, bn_cnts);
} else {
printf("need to add support for _b_mx %d %d\n", b_mx[1], b_mx[2]);
assert(0);
}
}
#endif
|
64291fab21e4b60aa075765008249e624da6c8b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/smooth_threshold_layer.hpp"
namespace caffe
{
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype* in, Dtype* out,
const Dtype* threshold, const Dtype beta, const Dtype alpha)
{
CUDA_KERNEL_LOOP(index, n) {
out[index] = 1. / (1. + alpha*exp(beta*(-abs(in[index])+threshold[0])));
}
}
template <typename Dtype>
void SmoothThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* threshold = this->blobs_[0]->gpu_data();
hipLaunchKernelGGL(( ThresholdForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
count, bottom_data, top_data, threshold, beta, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ThresholdBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data, Dtype* out_diff, Dtype* threshold_diff,
const Dtype beta) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype sigmoid_x = out_data[index];
Dtype sign_x = (Dtype(0) < in_data[index]) - (in_data[index] < Dtype(0));
out_diff[index] = in_diff[index] * sigmoid_x * (1.0 - sigmoid_x) * beta * sign_x;
threshold_diff[index] = sigmoid_x * (sigmoid_x - 1.0) * beta;
}
}
template <typename Dtype>
void SmoothThresholdLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
if (propagate_down[0])
{
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = bottom[0]->count();
Dtype* diff_th = diff.mutable_gpu_data();
hipLaunchKernelGGL(( ThresholdBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_data, bottom_diff, diff_th, beta);
CUDA_POST_KERNEL_CHECK;
Dtype* threshold_diff = this->blobs_[0]->mutable_cpu_diff();
caffe_gpu_dot(count, diff.gpu_data(), top[0]->gpu_diff(), threshold_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothThresholdLayer);
}
| 64291fab21e4b60aa075765008249e624da6c8b8.cu | #include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/smooth_threshold_layer.hpp"
namespace caffe
{
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype* in, Dtype* out,
const Dtype* threshold, const Dtype beta, const Dtype alpha)
{
CUDA_KERNEL_LOOP(index, n) {
out[index] = 1. / (1. + alpha*exp(beta*(-abs(in[index])+threshold[0])));
}
}
template <typename Dtype>
void SmoothThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* threshold = this->blobs_[0]->gpu_data();
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >>>(
count, bottom_data, top_data, threshold, beta, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ThresholdBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data, Dtype* out_diff, Dtype* threshold_diff,
const Dtype beta) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype sigmoid_x = out_data[index];
Dtype sign_x = (Dtype(0) < in_data[index]) - (in_data[index] < Dtype(0));
out_diff[index] = in_diff[index] * sigmoid_x * (1.0 - sigmoid_x) * beta * sign_x;
threshold_diff[index] = sigmoid_x * (sigmoid_x - 1.0) * beta;
}
}
template <typename Dtype>
void SmoothThresholdLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
if (propagate_down[0])
{
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = bottom[0]->count();
Dtype* diff_th = diff.mutable_gpu_data();
ThresholdBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_data, bottom_diff, diff_th, beta);
CUDA_POST_KERNEL_CHECK;
Dtype* threshold_diff = this->blobs_[0]->mutable_cpu_diff();
caffe_gpu_dot(count, diff.gpu_data(), top[0]->gpu_diff(), threshold_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothThresholdLayer);
}
|
29468a9b3d118dc0537ad7dffeb51f7badc476fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output,
const T *table,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table,
const T *output,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
phi::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
phi::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<phi::DenseTensor>("W");
auto *output_t = context_.Output<phi::DenseTensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, false>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
} else {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, true>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids,
const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx = context_.template device_context<phi::GPUContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<phi::DenseTensor>("W");
auto *d_output =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
phi::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
phi::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
hipLaunchKernelGGL(( InputTypeConvert), dim3(grids), dim3(threads), 0, stream,
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place,
mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place,
ids_data,
ids_num * sizeof(int64_t),
stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(),
d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(),
d_output_dims_2d));
memory::Copy(gpu_place,
d_table_data,
gpu_place,
d_output_data,
d_output->numel() * sizeof(T),
stream);
} else {
auto d_output_t =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<phi::DenseTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
hipLaunchKernelGGL(( LookupTableV2Grad<T, IdT>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
| 29468a9b3d118dc0537ad7dffeb51f7badc476fa.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output,
const T *table,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table,
const T *output,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
phi::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
phi::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<phi::DenseTensor>("W");
auto *output_t = context_.Output<phi::DenseTensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
LookupTableV2<T, IdT, false><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
} else {
LookupTableV2<T, IdT, true><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids,
const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx = context_.template device_context<phi::GPUContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<phi::DenseTensor>("W");
auto *d_output =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
phi::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
phi::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
InputTypeConvert<<<grids, threads, 0, stream>>>(
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place,
mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place,
ids_data,
ids_num * sizeof(int64_t),
stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(),
d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(),
d_output_dims_2d));
memory::Copy(gpu_place,
d_table_data,
gpu_place,
d_output_data,
d_output->numel() * sizeof(T),
stream);
} else {
auto d_output_t =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<phi::DenseTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
LookupTableV2Grad<T, IdT><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
|
8eace921ecc611b8449ee8648fe576eff5ce4b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <time.h>
#include <hip/hip_runtime.h>
#define RED 2
#define GREEN 1
#define BLUE 0
#define Channels 3
using namespace cv;
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
int pos = (row*width+col)*Channels;
imageOutput[row*width+col] = imageInput[pos+RED]*0.299 + imageInput[pos+GREEN]*0.587 + imageInput[pos+BLUE]*0.114;
}
}
int main(int argc, char **argv){
hipError_t error = hipSuccess;
//times
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char* imageName = argv[1];
unsigned char *h_ImageInit;//*dataRawImage
unsigned char *d_ImageInit;//d_dataRawImage
unsigned char *d_imageOutput;
unsigned char *h_imageOutput;
//se carga la imagen
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
//se toman los parametros de la imagen
Size s = image.size();
int width = s.width;
int height = s.height;
//tamao de la imagen
int size = sizeof(unsigned char)*width*height*image.channels(); //para la imagen normal (3 canales)
int sizeGray = sizeof(unsigned char)*width*height;//para la imagen en escala de grises (1 canal)
//reserve memory for Host and device ////////////////////////////////////////
//Imagen inicial en el Host
h_ImageInit = (unsigned char*)malloc(size);
//Imagen de salida del Device
h_imageOutput = (unsigned char *)malloc(sizeGray);
//Imagen inicial en el device
error = hipMalloc((void**)&d_ImageInit,size);
if(error != hipSuccess){
printf("Error reservando memoria para Imagen inicial en el device\n");
exit(-1);
}
//Imagen salida en el device
error = hipMalloc((void**)&d_imageOutput,sizeGray);
if(error != hipSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
///////////////////////////////////////////////////////////////////////////////
//se carga la imagen
h_ImageInit = image.data;
////////////////////////Algoritmo Paralelo /////////////////////////////////
//tiempo GPU
startGPU = clock();
//se copian los datos de la imagen del host al device
error = hipMemcpy(d_ImageInit, h_ImageInit,size, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);////bloque de 32 x 32 hilos = 1024 hilos
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
hipLaunchKernelGGL(( img2gray), dim3(dimGrid),dim3(dimBlock), 0, 0, d_ImageInit,width,height,d_imageOutput);
hipDeviceSynchronize();
//copian los datos de la imagen del device a la de salida del host
hipMemcpy(h_imageOutput,d_imageOutput,sizeGray,hipMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
////////////////////////Algoritmo Paralelo /////////////////////////////////
////////////////////////Algoritmo OpenCV /////////////////////////////////
start = clock();
Mat gray_image_opencv;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
end = clock();
////////////////////////Algoritmo OpenCV /////////////////////////////////
imwrite("./Gray_Image.jpg",gray_image);
//display times
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
printf("La aceleracin obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
free(h_ImageInit);
free(h_imageOutput);
hipFree(d_ImageInit);
hipFree(d_imageOutput);
return 0;
}
| 8eace921ecc611b8449ee8648fe576eff5ce4b0b.cu | #include <stdlib.h>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <time.h>
#include <cuda.h>
#define RED 2
#define GREEN 1
#define BLUE 0
#define Channels 3
using namespace cv;
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
int pos = (row*width+col)*Channels;
imageOutput[row*width+col] = imageInput[pos+RED]*0.299 + imageInput[pos+GREEN]*0.587 + imageInput[pos+BLUE]*0.114;
}
}
int main(int argc, char **argv){
cudaError_t error = cudaSuccess;
//times
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char* imageName = argv[1];
unsigned char *h_ImageInit;//*dataRawImage
unsigned char *d_ImageInit;//d_dataRawImage
unsigned char *d_imageOutput;
unsigned char *h_imageOutput;
//se carga la imagen
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
//se toman los parametros de la imagen
Size s = image.size();
int width = s.width;
int height = s.height;
//tamaño de la imagen
int size = sizeof(unsigned char)*width*height*image.channels(); //para la imagen normal (3 canales)
int sizeGray = sizeof(unsigned char)*width*height;//para la imagen en escala de grises (1 canal)
//reserve memory for Host and device ////////////////////////////////////////
//Imagen inicial en el Host
h_ImageInit = (unsigned char*)malloc(size);
//Imagen de salida del Device
h_imageOutput = (unsigned char *)malloc(sizeGray);
//Imagen inicial en el device
error = cudaMalloc((void**)&d_ImageInit,size);
if(error != cudaSuccess){
printf("Error reservando memoria para Imagen inicial en el device\n");
exit(-1);
}
//Imagen salida en el device
error = cudaMalloc((void**)&d_imageOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
///////////////////////////////////////////////////////////////////////////////
//se carga la imagen
h_ImageInit = image.data;
////////////////////////Algoritmo Paralelo /////////////////////////////////
//tiempo GPU
startGPU = clock();
//se copian los datos de la imagen del host al device
error = cudaMemcpy(d_ImageInit, h_ImageInit,size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);////bloque de 32 x 32 hilos = 1024 hilos
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
img2gray<<<dimGrid,dimBlock>>>(d_ImageInit,width,height,d_imageOutput);
cudaDeviceSynchronize();
//copian los datos de la imagen del device a la de salida del host
cudaMemcpy(h_imageOutput,d_imageOutput,sizeGray,cudaMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
////////////////////////Algoritmo Paralelo /////////////////////////////////
////////////////////////Algoritmo OpenCV /////////////////////////////////
start = clock();
Mat gray_image_opencv;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
end = clock();
////////////////////////Algoritmo OpenCV /////////////////////////////////
imwrite("./Gray_Image.jpg",gray_image);
//display times
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
printf("La aceleración obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
free(h_ImageInit);
free(h_imageOutput);
cudaFree(d_ImageInit);
cudaFree(d_imageOutput);
return 0;
}
|
271113dde023f5a12120db47450b9b2be529d11c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/LocalGeometry.h>
#include <cuda/random.h>
#include <cuda/whitted.h>
#include <sutil/vec_math.h>
#include <stdint.h>
#include <stdio.h>
extern "C"
{
__constant__ whitted::LaunchParams params;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
whitted::PayloadRadiance* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
whitted::RAY_TYPE_RADIANCE, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3 );
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->depth = u3;
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
whitted::RAY_TYPE_OCCLUSION, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
const float gamma = 2.2f;
return make_uchar4(
static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
255u
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__pinhole()
{
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
//
// Generate camera ray
//
uint32_t seed = tea<4>( launch_idx.y*launch_dims.x + launch_idx.x, subframe_index );
const float2 subpixel_jitter = subframe_index == 0 ?
make_float2( 0.0f, 0.0f ) :
make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float3 ray_direction = normalize(d.x*U + d.y*V + W);
const float3 ray_origin = eye;
//
// Trace camera ray
//
whitted::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
// TODO: timview mode
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
float3 accum_color = payload.result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__constant_radiance()
{
setPayloadResult( params.miss_color );
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
const whitted::HitGroupData* hit_group_data = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.lights.count; ++i )
{
Light::Point light = params.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( params.handle, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
}
| 271113dde023f5a12120db47450b9b2be529d11c.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/LocalGeometry.h>
#include <cuda/random.h>
#include <cuda/whitted.h>
#include <sutil/vec_math.h>
#include <stdint.h>
#include <stdio.h>
extern "C"
{
__constant__ whitted::LaunchParams params;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
whitted::PayloadRadiance* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
whitted::RAY_TYPE_RADIANCE, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3 );
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->depth = u3;
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
whitted::RAY_TYPE_OCCLUSION, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
const float gamma = 2.2f;
return make_uchar4(
static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
255u
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__pinhole()
{
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
//
// Generate camera ray
//
uint32_t seed = tea<4>( launch_idx.y*launch_dims.x + launch_idx.x, subframe_index );
const float2 subpixel_jitter = subframe_index == 0 ?
make_float2( 0.0f, 0.0f ) :
make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float3 ray_direction = normalize(d.x*U + d.y*V + W);
const float3 ray_origin = eye;
//
// Trace camera ray
//
whitted::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
// TODO: timview mode
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
float3 accum_color = payload.result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__constant_radiance()
{
setPayloadResult( params.miss_color );
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
const whitted::HitGroupData* hit_group_data = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.lights.count; ++i )
{
Light::Point light = params.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( params.handle, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
}
|
c1610003428b311ddf257ba905e8a478a0064dca.hip | // !!! This is a file automatically generated by hipify!!!
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchmatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
hipMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
hipMalloc(&sum, dim.height*dim.width*sizeof(float));
hipMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
hipMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
hipMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), hipMemcpyDeviceToDevice);
int index;
float minv, maxv;
hipblasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&minv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
hipblasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&maxv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
hipFree(x2);
hipFree(ones);
hipFree(dis);
hipFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
hipSetDevice(no);
}
void DeepAnalogy::SetId(int no1, int no2){
na = no1;
nbp = no2;
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//BAale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
hipMalloc(¶ms_device_AB, param_size * sizeof(int));
hipMalloc(¶ms_device_BA, param_size * sizeof(int));
hipMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
hipMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
hipMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
hipMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
hipMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
hipMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
hipFree(Ndata_A);
hipFree(Ndata_A1);
hipFree(Ndata_B);
hipFree(Ndata_BP);
hipFree(response_A);
hipFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
hipFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
hipFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB_%d_%d.png", na, nbp);
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA_%d_%d.png", na, nbp);
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB_%d_%d.png", na, nbp);
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA_%d_%d.png", na, nbp);
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB_%d_%d.txt", na, nbp);
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA_%d_%d.txt", na, nbp);
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
hipFree(params_device_AB);
hipFree(ann_device_AB);
hipFree(annd_device_AB);
hipFree(params_device_BA);
hipFree(ann_device_BA);
hipFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
hipFree(data_A[i]);
hipFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
| c1610003428b311ddf257ba905e8a478a0064dca.cu | #include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchmatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
cudaMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
cudaMalloc(&sum, dim.height*dim.width*sizeof(float));
cudaMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
cudaMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
cudaMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), cudaMemcpyDeviceToDevice);
int index;
float minv, maxv;
cublasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&minv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
cublasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&maxv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
cudaFree(x2);
cudaFree(ones);
cudaFree(dis);
cudaFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
cudaSetDevice(no);
}
void DeepAnalogy::SetId(int no1, int no2){
na = no1;
nbp = no2;
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//BAale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
cudaMalloc(¶ms_device_AB, param_size * sizeof(int));
cudaMalloc(¶ms_device_BA, param_size * sizeof(int));
cudaMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
cudaMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
cudaMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
cudaMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
cudaMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
cudaMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
cudaFree(Ndata_A);
cudaFree(Ndata_A1);
cudaFree(Ndata_B);
cudaFree(Ndata_BP);
cudaFree(response_A);
cudaFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
cudaFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
cudaFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB_%d_%d.png", na, nbp);
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA_%d_%d.png", na, nbp);
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB_%d_%d.png", na, nbp);
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA_%d_%d.png", na, nbp);
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB_%d_%d.txt", na, nbp);
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA_%d_%d.txt", na, nbp);
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
cudaFree(params_device_AB);
cudaFree(ann_device_AB);
cudaFree(annd_device_AB);
cudaFree(params_device_BA);
cudaFree(ann_device_BA);
cudaFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
cudaFree(data_A[i]);
cudaFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.