hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
2ab7c3611b2256a4fa16f9d606a608a263eaaf6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/layers/misc/dist_embedding.hpp"
#ifdef LBANN_HAS_NVSHMEM
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/nvshmem.hpp"
namespace lbann
{
namespace
{
// Typedefs
using Size2 = cuda::array<size_t, 2>;
template <typename T>
using VectorMetadata = typename dist_embedding_layer<T,data_layout::DATA_PARALLEL,El::Device::GPU>::vector_metadata;
/** Copy between two device buffers, using all threads in a warp. */
template <typename T> __device__ __forceinline__
T* memcpy_warp(T* __restrict__ dest, const T* __restrict__ src, size_t n) {
constexpr size_t warp_size = 32;
for (size_t i = threadIdx.x; i < n; i += warp_size) {
dest[i] = src[i];
}
__syncwarp();
return dest;
}
/** See El::AbstractDistMatrix::ColOwner. */
__device__ __forceinline__
size_t distmat_index_owner(size_t global_index, size_t align, size_t stride) {
return (global_index + align) % stride;
}
/** See El::AbstractDistMatrix::GlobalCol. */
__device__ __forceinline__
size_t distmat_global_index(size_t local_index, size_t shift, size_t stride) {
return shift + local_index * stride;
}
/** See El::AbstractDistMatrix::LocalCol. */
__device__ __forceinline__
size_t distmat_local_index(size_t global_index, size_t rank, size_t align, size_t stride) {
auto shift = (stride + rank - align) % stride;
if (global_index > shift) {
return (global_index - shift - 1) / stride + 1;
}
else {
return 0;
}
}
/** Launch a CUDA kernel.
*
* @todo Check that argument types match kernel signature.
*/
template <typename Kernel, typename... Args>
inline void launch_cuda_kernel(
const Kernel& kernel,
dim3 grid_dims,
dim3 block_dims,
size_t shared_mem,
hipStream_t stream,
Args... args) {
void* arg_list[] = {
const_cast<void*>(reinterpret_cast<const void*>(&args))...
};
CHECK_CUDA(
cudaLaunchKernel(
reinterpret_cast<const void*>(&kernel),
grid_dims,
block_dims,
arg_list,
shared_mem,
stream));
}
/** Launch a collective NVSHMEM kernel.
*
* Needed for device-side NVSHMEM synchronization calls like
* nvshmem_wait. If grid_dims is zero, then the NVSHMEM will launch
* with the largest available grid.
*
* @todo Check that argument types match kernel signature.
*/
template <typename Kernel, typename... Args>
inline void launch_nvshmem_collective_kernel(
const Kernel& kernel,
dim3 grid_dims,
dim3 block_dims,
size_t shared_mem,
hipStream_t stream,
Args... args) {
if (grid_dims.x == 0) {
grid_dims.y = 0;
grid_dims.z = 0;
}
void* arg_list[] = {
const_cast<void*>(reinterpret_cast<const void*>(&args))...
};
auto status = nvshmemx_collective_launch(
reinterpret_cast<const void*>(&kernel),
grid_dims,
block_dims,
arg_list,
shared_mem,
stream);
if (status != 0) {
LBANN_ERROR(
"Failed to launch NVSHMEM collective kernel ",
"(error ",status,")");
}
}
} // namespace <anon>
// ---------------------------------------------
// Life cycle and setup
// ---------------------------------------------
template <typename TensorDataType, data_layout Layout, El::Device Device>
dist_embedding_layer<TensorDataType,Layout,Device>::~dist_embedding_layer()
{
if (m_embeddings_buffer != nullptr) {
nvshmem_free(m_embeddings_buffer);
}
if (m_workspace_buffer != nullptr) {
nvshmem_free(m_workspace_buffer);
}
if (m_metadata_buffer != nullptr) {
nvshmem_free(m_metadata_buffer);
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::attach_embeddings_to_shmem_buffer() {
if (m_embeddings_buffer != nullptr || m_embeddings_buffer_size != 0) {
LBANN_ERROR("attempted to attach embedding matrix ",
"to NVSHMEM buffer multiple times");
}
// Embedding weights matrix
using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>;
auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0));
const auto dist = embeddings.DistData();
if (dist.device != El::Device::GPU) {
LBANN_ERROR("attempted to attach non-GPU matrix to NVSHMEM buffer");
}
#if 0 // nvshmem_addr_accessible is not supported as of NVSHMEM 1.4
if (nvshmem_addr_accessible(embeddings.LockedBuffer(), nvshmem_my_pe())) {
return;
}
#endif
// Calculate size of NVSHMEM buffer
const auto col_comm_size = El::mpi::Size(embeddings.ColComm());
const auto row_comm_size = El::mpi::Size(embeddings.RowComm());
const auto height = embeddings.Height();
const auto width = embeddings.Width();
const auto local_height = (height + col_comm_size - 1) / col_comm_size;
const auto local_width = (width + row_comm_size - 1) / row_comm_size;
m_embeddings_buffer_size = local_height * local_width * sizeof(TensorDataType);
if (m_embeddings_buffer_size == 0) {
return;
}
// Allocate NVSHMEM buffer
m_embeddings_buffer = nvshmem::malloc<TensorDataType>(m_embeddings_buffer_size);
// Attach matrix to NVSHMEM buffer
std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> orig_mat(
embeddings.Construct(embeddings.Grid(), embeddings.Root()));
*orig_mat = std::move(embeddings);
embeddings.Empty();
embeddings.AlignWith(dist);
dynamic_cast<El::ElementalMatrix<TensorDataType>&>(embeddings).Attach(
height, width,
*dist.grid, dist.colAlign, dist.rowAlign,
m_embeddings_buffer, local_height, dist.root);
El::Copy(*orig_mat, embeddings);
}
// ---------------------------------------------
// Forward prop
// ---------------------------------------------
namespace
{
/** Request embedding vectors from owner processes.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void request_embeddings_kernel(
size_t embedding_dim,
Size2 input_dims,
const T* __restrict__ input,
Size2 input_strides,
const T* __restrict__ embeddings,
Size2 embeddings_strides,
VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
T* __restrict__ workspace,
Size2 workspace_strides,
size_t rank,
size_t input_rowshift,
size_t input_rowstride,
size_t embeddings_rowalign,
size_t embeddings_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
// Get embedding vector index
const auto& global_index_float = input[i*input_strides[1] + j*input_strides[0]];
const auto& global_index = static_cast<size_t>(cuda::floor(global_index_float));
// Figure out which process owns embedding vector
__shared__ unsigned char metadata_shared[sizeof(VectorMetadata<T>)];
auto& m = *reinterpret_cast<VectorMetadata<T>*>(metadata_shared);
if (threadIdx.x == 0) {
m.source_rank = distmat_index_owner(global_index, embeddings_rowalign, embeddings_rowstride);
m.source_index = distmat_local_index(global_index, m.source_rank, embeddings_rowalign, embeddings_rowstride);
m.target_rank = rank;
m.target_index = i + global_j*input_dims[1];
m.is_active = true;
metadata[i*metadata_strides[1] + global_j*metadata_strides[0]] = m;
}
__syncwarp();
// Get embedding vector from owner process
nvshmemx_getmem_nbi_warp(
&workspace[m.target_index * workspace_strides[0]],
&embeddings[m.source_index * embeddings_strides[0]],
embedding_dim*sizeof(T),
m.source_rank);
}
}
}
/** Copy embedding vectors to output tensor.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void copy_embeddings_kernel(
size_t embedding_dim,
Size2 input_dims,
const VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
const T* __restrict__ workspace,
Size2 workspace_strides,
T* __restrict__ output,
Size2 output_strides,
size_t input_rowshift,
size_t input_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
const auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]];
memcpy_warp(
&output[i*embedding_dim + j*output_strides[0]],
&workspace[m.target_index * workspace_strides[0]],
embedding_dim);
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::fp_compute() {
// Data matrices
// Note: Make sure to get original weight values since they are in
// SHMEM buffer.
using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>;
const auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0));
const auto& input = this->get_prev_activations();
const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix());
auto& local_output = dynamic_cast<LocalMat&>(this->get_local_activations());
// Dimensions
const size_t input_size = this->get_input_size();
const size_t output_size = this->get_output_size();
const size_t mini_batch_size = input.Width();
const size_t local_mini_batch_size = local_input.Width();
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
nvshmem::initialize();
// Barrier to handle gradient checking
/// @todo Think of a way to avoid this synchronization
if (m_barrier_in_forward_prop) {
nvshmemx_barrier_all_on_stream(stream);
}
// Synchronize non-blocking barrier
// Note: Make sure embeddings are up-to-date and NVSHMEM workspaces
// are safe to reset.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize NVSHMEM buffer for communicating embedding vectors
if (m_workspace_buffer_size < output_size * mini_batch_size) {
m_workspace_buffer_size = output_size * mini_batch_size;
m_workspace_buffer = nvshmem::realloc(m_workspace_buffer,
m_workspace_buffer_size);
}
LocalMat workspace(
m_embedding_dim,
input_size * mini_batch_size,
m_workspace_buffer,
m_embedding_dim);
// Initialize NVSHMEM buffer for embedding vector metadata
if (m_metadata_buffer_size < input_size * mini_batch_size) {
m_metadata_buffer_size = input_size * mini_batch_size;
m_metadata_buffer = nvshmem::realloc(m_metadata_buffer,
m_metadata_buffer_size);
}
CHECK_CUDA(
hipMemsetAsync(
m_metadata_buffer,
0,
m_metadata_buffer_size*sizeof(vector_metadata),
stream));
// Request embedding vectors from owning processes
const size_t rank = comm.get_rank_in_trainer();
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
request_embeddings_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
local_input.LockedBuffer(),
Size2{size_t(local_input.LDim()), 1},
embeddings.LockedBuffer(),
Size2{size_t(embeddings.LDim()), 1},
m_metadata_buffer,
Size2{input_size, 1},
workspace.Buffer(),
Size2{size_t(workspace.LDim()), 1},
size_t(rank),
size_t(input.RowShift()),
size_t(input.RowStride()),
size_t(embeddings.RowAlign()),
size_t(embeddings.RowStride()));
}
nvshmemx_quiet_on_stream(stream);
// Copy embedding vectors to output tensor
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
copy_embeddings_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
m_metadata_buffer,
Size2{input_size, 1},
workspace.LockedBuffer(),
Size2{size_t(workspace.LDim()), 1},
local_output.Buffer(),
Size2{size_t(local_output.LDim()), 1},
size_t(input.RowShift()),
size_t(input.RowStride()));
}
// Non-blocking barrier
// Note: NVSHMEM workspaces are ready to recieve gradients.
nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request);
}
// ---------------------------------------------
// Backprop
// ---------------------------------------------
namespace
{
/** Send gradients to owner processes.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void send_gradients_kernel(
size_t embedding_dim,
Size2 input_dims,
const T* __restrict__ output_grad,
Size2 output_grad_strides,
VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
T* __restrict__ workspace,
Size2 workspace_strides,
size_t input_rowshift,
size_t input_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
// Assign metadata to CUDA blocks
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
// Send gradients to owner processes
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]];
auto* workspace_ptr = &workspace[m.target_index * workspace_strides[0]];
memcpy_warp(
workspace_ptr,
&output_grad[i*embedding_dim + j*output_grad_strides[0]],
embedding_dim);
if (m.source_rank != m.target_rank) {
nvshmemx_putmem_nbi_warp(
workspace_ptr,
workspace_ptr,
embedding_dim*sizeof(T),
m.source_rank);
nvshmemx_putmem_nbi_warp(
&m,
&m,
sizeof(VectorMetadata<T>),
m.source_rank);
}
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::bp_compute() {
// Data matrices
const auto& input = this->get_prev_activations();
const auto& local_output_grad = dynamic_cast<const LocalMat&>(this->get_local_prev_error_signals());
// Dimensions
const size_t input_size = this->get_input_size();
const size_t mini_batch_size = input.Width();
const size_t local_mini_batch_size = local_output_grad.Width();
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
// Synchronize non-blocking barrier
// Note: Make sure NVSHMEM workspaces are ready to recieve gradients.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize NVSHMEM buffer for gradient w.r.t. embeddings
LocalMat workspace(
m_embedding_dim,
input_size * mini_batch_size,
m_workspace_buffer,
m_embedding_dim);
// Send gradients to owner processes
if (!local_output_grad.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
send_gradients_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
local_output_grad.LockedBuffer(),
Size2{size_t(local_output_grad.LDim()), 1},
m_metadata_buffer,
Size2{input_size, 1},
workspace.Buffer(),
Size2{size_t(workspace.LDim()), 1},
size_t(input.RowShift()),
size_t(input.RowStride()));
}
nvshmemx_quiet_on_stream(stream);
// Non-blocking barrier
// Note: Gradients have been sent.
nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request);
// Use dense optimizer if needed
if (!m_sparse_sgd) {
// Create buffer for dense gradients
const auto& embeddings = this->weights_values(0);
std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> embeddings_grad(
embeddings.Construct(embeddings.Grid(), embeddings.Root()));
embeddings_grad->AlignWith(embeddings);
El::Zeros(*embeddings_grad, embeddings.Height(), embeddings.Width());
auto& local_embeddings_grad = dynamic_cast<LocalMat&>(embeddings_grad->Matrix());
// Apply SGD step to convert sparse gradients to dense gradients
apply_sparse_sgd_step(
input_size * mini_batch_size,
local_embeddings_grad);
// Send dense gradients to dense optimizer
auto* opt = this->get_weights(0).get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(*embeddings_grad);
}
}
}
// ---------------------------------------------
// Sparse SGD
// ---------------------------------------------
namespace
{
/** Sparse SGD on local embeddings.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: num_gradients x 1 x 1
*/
template <typename T>
__global__ void sgd_kernel(
T learning_rate,
size_t embedding_dim,
size_t num_gradients,
const VectorMetadata<T>* __restrict__ metadata,
const T* __restrict__ embeddings_grad,
Size2 embeddings_grad_strides,
T* __restrict__ embeddings,
Size2 embeddings_strides,
size_t rank) {
// Indices
const size_t tid = threadIdx.x;
const size_t bid = blockIdx.x;
const size_t nblocks = gridDim.x;
constexpr size_t warp_size = 32;
// Assign requests to CUDA blocks
const size_t gradients_per_block = (num_gradients + nblocks - 1) / nblocks;
const size_t i_start = bid * gradients_per_block;
const size_t i_end = cuda::min((bid+1) * gradients_per_block, num_gradients);
for (size_t i = i_start; i < i_end; ++i) {
const auto& m = metadata[i];
if (m.is_active && m.source_rank == rank) {
// Update embedding vector with gradient
const auto* __restrict__ dw = &embeddings_grad[m.target_index * embeddings_grad_strides[0]];
auto* __restrict__ w = &embeddings[m.source_index * embeddings_strides[0]];
for (size_t k = tid; k < embedding_dim; k += warp_size) {
cuda::atomic_add(&w[k], -learning_rate * dw[k]);
}
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::apply_sparse_sgd_step(
size_t num_gradients,
LocalMat& local_embeddings) {
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
// Synchronize non-blocking barrier
// Note: Make sure gradients have been received.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize SHMEM buffer for gradient w.r.t. embeddings
LocalMat local_embeddings_grad(
m_embedding_dim,
num_gradients,
m_workspace_buffer,
m_embedding_dim);
// Sparse SGD on local embeddings
const size_t rank = comm.get_rank_in_trainer();
constexpr size_t block_size = 32;
const size_t grid_size = num_gradients;
launch_cuda_kernel(
sgd_kernel<TensorDataType>,
grid_size,
block_size,
0,
stream,
m_learning_rate,
m_embedding_dim,
num_gradients,
m_metadata_buffer,
local_embeddings_grad.LockedBuffer(),
Size2{size_t(local_embeddings_grad.LDim()), 1},
local_embeddings.Buffer(),
Size2{size_t(local_embeddings.LDim()), 1},
rank);
}
// ---------------------------------------------
// Explicit template instantiation
// ---------------------------------------------
/// @todo fp16
template class dist_embedding_layer<
float, data_layout::DATA_PARALLEL, El::Device::GPU>;
template class dist_embedding_layer<
double, data_layout::DATA_PARALLEL, El::Device::GPU>;
} // namespace lbann
#endif // LBANN_HAS_NVSHMEM
|
2ab7c3611b2256a4fa16f9d606a608a263eaaf6d.cu
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/layers/misc/dist_embedding.hpp"
#ifdef LBANN_HAS_NVSHMEM
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/nvshmem.hpp"
namespace lbann
{
namespace
{
// Typedefs
using Size2 = cuda::array<size_t, 2>;
template <typename T>
using VectorMetadata = typename dist_embedding_layer<T,data_layout::DATA_PARALLEL,El::Device::GPU>::vector_metadata;
/** Copy between two device buffers, using all threads in a warp. */
template <typename T> __device__ __forceinline__
T* memcpy_warp(T* __restrict__ dest, const T* __restrict__ src, size_t n) {
constexpr size_t warp_size = 32;
for (size_t i = threadIdx.x; i < n; i += warp_size) {
dest[i] = src[i];
}
__syncwarp();
return dest;
}
/** See El::AbstractDistMatrix::ColOwner. */
__device__ __forceinline__
size_t distmat_index_owner(size_t global_index, size_t align, size_t stride) {
return (global_index + align) % stride;
}
/** See El::AbstractDistMatrix::GlobalCol. */
__device__ __forceinline__
size_t distmat_global_index(size_t local_index, size_t shift, size_t stride) {
return shift + local_index * stride;
}
/** See El::AbstractDistMatrix::LocalCol. */
__device__ __forceinline__
size_t distmat_local_index(size_t global_index, size_t rank, size_t align, size_t stride) {
auto shift = (stride + rank - align) % stride;
if (global_index > shift) {
return (global_index - shift - 1) / stride + 1;
}
else {
return 0;
}
}
/** Launch a CUDA kernel.
*
* @todo Check that argument types match kernel signature.
*/
template <typename Kernel, typename... Args>
inline void launch_cuda_kernel(
const Kernel& kernel,
dim3 grid_dims,
dim3 block_dims,
size_t shared_mem,
cudaStream_t stream,
Args... args) {
void* arg_list[] = {
const_cast<void*>(reinterpret_cast<const void*>(&args))...
};
CHECK_CUDA(
cudaLaunchKernel(
reinterpret_cast<const void*>(&kernel),
grid_dims,
block_dims,
arg_list,
shared_mem,
stream));
}
/** Launch a collective NVSHMEM kernel.
*
* Needed for device-side NVSHMEM synchronization calls like
* nvshmem_wait. If grid_dims is zero, then the NVSHMEM will launch
* with the largest available grid.
*
* @todo Check that argument types match kernel signature.
*/
template <typename Kernel, typename... Args>
inline void launch_nvshmem_collective_kernel(
const Kernel& kernel,
dim3 grid_dims,
dim3 block_dims,
size_t shared_mem,
cudaStream_t stream,
Args... args) {
if (grid_dims.x == 0) {
grid_dims.y = 0;
grid_dims.z = 0;
}
void* arg_list[] = {
const_cast<void*>(reinterpret_cast<const void*>(&args))...
};
auto status = nvshmemx_collective_launch(
reinterpret_cast<const void*>(&kernel),
grid_dims,
block_dims,
arg_list,
shared_mem,
stream);
if (status != 0) {
LBANN_ERROR(
"Failed to launch NVSHMEM collective kernel ",
"(error ",status,")");
}
}
} // namespace <anon>
// ---------------------------------------------
// Life cycle and setup
// ---------------------------------------------
template <typename TensorDataType, data_layout Layout, El::Device Device>
dist_embedding_layer<TensorDataType,Layout,Device>::~dist_embedding_layer()
{
if (m_embeddings_buffer != nullptr) {
nvshmem_free(m_embeddings_buffer);
}
if (m_workspace_buffer != nullptr) {
nvshmem_free(m_workspace_buffer);
}
if (m_metadata_buffer != nullptr) {
nvshmem_free(m_metadata_buffer);
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::attach_embeddings_to_shmem_buffer() {
if (m_embeddings_buffer != nullptr || m_embeddings_buffer_size != 0) {
LBANN_ERROR("attempted to attach embedding matrix ",
"to NVSHMEM buffer multiple times");
}
// Embedding weights matrix
using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>;
auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0));
const auto dist = embeddings.DistData();
if (dist.device != El::Device::GPU) {
LBANN_ERROR("attempted to attach non-GPU matrix to NVSHMEM buffer");
}
#if 0 // nvshmem_addr_accessible is not supported as of NVSHMEM 1.4
if (nvshmem_addr_accessible(embeddings.LockedBuffer(), nvshmem_my_pe())) {
return;
}
#endif
// Calculate size of NVSHMEM buffer
const auto col_comm_size = El::mpi::Size(embeddings.ColComm());
const auto row_comm_size = El::mpi::Size(embeddings.RowComm());
const auto height = embeddings.Height();
const auto width = embeddings.Width();
const auto local_height = (height + col_comm_size - 1) / col_comm_size;
const auto local_width = (width + row_comm_size - 1) / row_comm_size;
m_embeddings_buffer_size = local_height * local_width * sizeof(TensorDataType);
if (m_embeddings_buffer_size == 0) {
return;
}
// Allocate NVSHMEM buffer
m_embeddings_buffer = nvshmem::malloc<TensorDataType>(m_embeddings_buffer_size);
// Attach matrix to NVSHMEM buffer
std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> orig_mat(
embeddings.Construct(embeddings.Grid(), embeddings.Root()));
*orig_mat = std::move(embeddings);
embeddings.Empty();
embeddings.AlignWith(dist);
dynamic_cast<El::ElementalMatrix<TensorDataType>&>(embeddings).Attach(
height, width,
*dist.grid, dist.colAlign, dist.rowAlign,
m_embeddings_buffer, local_height, dist.root);
El::Copy(*orig_mat, embeddings);
}
// ---------------------------------------------
// Forward prop
// ---------------------------------------------
namespace
{
/** Request embedding vectors from owner processes.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void request_embeddings_kernel(
size_t embedding_dim,
Size2 input_dims,
const T* __restrict__ input,
Size2 input_strides,
const T* __restrict__ embeddings,
Size2 embeddings_strides,
VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
T* __restrict__ workspace,
Size2 workspace_strides,
size_t rank,
size_t input_rowshift,
size_t input_rowstride,
size_t embeddings_rowalign,
size_t embeddings_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
// Get embedding vector index
const auto& global_index_float = input[i*input_strides[1] + j*input_strides[0]];
const auto& global_index = static_cast<size_t>(cuda::floor(global_index_float));
// Figure out which process owns embedding vector
__shared__ unsigned char metadata_shared[sizeof(VectorMetadata<T>)];
auto& m = *reinterpret_cast<VectorMetadata<T>*>(metadata_shared);
if (threadIdx.x == 0) {
m.source_rank = distmat_index_owner(global_index, embeddings_rowalign, embeddings_rowstride);
m.source_index = distmat_local_index(global_index, m.source_rank, embeddings_rowalign, embeddings_rowstride);
m.target_rank = rank;
m.target_index = i + global_j*input_dims[1];
m.is_active = true;
metadata[i*metadata_strides[1] + global_j*metadata_strides[0]] = m;
}
__syncwarp();
// Get embedding vector from owner process
nvshmemx_getmem_nbi_warp(
&workspace[m.target_index * workspace_strides[0]],
&embeddings[m.source_index * embeddings_strides[0]],
embedding_dim*sizeof(T),
m.source_rank);
}
}
}
/** Copy embedding vectors to output tensor.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void copy_embeddings_kernel(
size_t embedding_dim,
Size2 input_dims,
const VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
const T* __restrict__ workspace,
Size2 workspace_strides,
T* __restrict__ output,
Size2 output_strides,
size_t input_rowshift,
size_t input_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
const auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]];
memcpy_warp(
&output[i*embedding_dim + j*output_strides[0]],
&workspace[m.target_index * workspace_strides[0]],
embedding_dim);
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::fp_compute() {
// Data matrices
// Note: Make sure to get original weight values since they are in
// SHMEM buffer.
using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>;
const auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0));
const auto& input = this->get_prev_activations();
const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix());
auto& local_output = dynamic_cast<LocalMat&>(this->get_local_activations());
// Dimensions
const size_t input_size = this->get_input_size();
const size_t output_size = this->get_output_size();
const size_t mini_batch_size = input.Width();
const size_t local_mini_batch_size = local_input.Width();
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
nvshmem::initialize();
// Barrier to handle gradient checking
/// @todo Think of a way to avoid this synchronization
if (m_barrier_in_forward_prop) {
nvshmemx_barrier_all_on_stream(stream);
}
// Synchronize non-blocking barrier
// Note: Make sure embeddings are up-to-date and NVSHMEM workspaces
// are safe to reset.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize NVSHMEM buffer for communicating embedding vectors
if (m_workspace_buffer_size < output_size * mini_batch_size) {
m_workspace_buffer_size = output_size * mini_batch_size;
m_workspace_buffer = nvshmem::realloc(m_workspace_buffer,
m_workspace_buffer_size);
}
LocalMat workspace(
m_embedding_dim,
input_size * mini_batch_size,
m_workspace_buffer,
m_embedding_dim);
// Initialize NVSHMEM buffer for embedding vector metadata
if (m_metadata_buffer_size < input_size * mini_batch_size) {
m_metadata_buffer_size = input_size * mini_batch_size;
m_metadata_buffer = nvshmem::realloc(m_metadata_buffer,
m_metadata_buffer_size);
}
CHECK_CUDA(
cudaMemsetAsync(
m_metadata_buffer,
0,
m_metadata_buffer_size*sizeof(vector_metadata),
stream));
// Request embedding vectors from owning processes
const size_t rank = comm.get_rank_in_trainer();
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
request_embeddings_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
local_input.LockedBuffer(),
Size2{size_t(local_input.LDim()), 1},
embeddings.LockedBuffer(),
Size2{size_t(embeddings.LDim()), 1},
m_metadata_buffer,
Size2{input_size, 1},
workspace.Buffer(),
Size2{size_t(workspace.LDim()), 1},
size_t(rank),
size_t(input.RowShift()),
size_t(input.RowStride()),
size_t(embeddings.RowAlign()),
size_t(embeddings.RowStride()));
}
nvshmemx_quiet_on_stream(stream);
// Copy embedding vectors to output tensor
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
copy_embeddings_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
m_metadata_buffer,
Size2{input_size, 1},
workspace.LockedBuffer(),
Size2{size_t(workspace.LDim()), 1},
local_output.Buffer(),
Size2{size_t(local_output.LDim()), 1},
size_t(input.RowShift()),
size_t(input.RowStride()));
}
// Non-blocking barrier
// Note: NVSHMEM workspaces are ready to recieve gradients.
nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request);
}
// ---------------------------------------------
// Backprop
// ---------------------------------------------
namespace
{
/** Send gradients to owner processes.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: input_dims[1] x input_dims[0] x 1
*/
template <typename T>
__global__ void send_gradients_kernel(
size_t embedding_dim,
Size2 input_dims,
const T* __restrict__ output_grad,
Size2 output_grad_strides,
VectorMetadata<T>* __restrict__ metadata,
Size2 metadata_strides,
T* __restrict__ workspace,
Size2 workspace_strides,
size_t input_rowshift,
size_t input_rowstride) {
// Indices
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
// Assign metadata to CUDA blocks
const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx;
const size_t i_start = bidx * i_per_block;
const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]);
// Send gradients to owner processes
for (size_t j = bidy; j < input_dims[0]; j += nblocksy) {
for (size_t i = i_start; i < i_end; ++i) {
const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride);
auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]];
auto* workspace_ptr = &workspace[m.target_index * workspace_strides[0]];
memcpy_warp(
workspace_ptr,
&output_grad[i*embedding_dim + j*output_grad_strides[0]],
embedding_dim);
if (m.source_rank != m.target_rank) {
nvshmemx_putmem_nbi_warp(
workspace_ptr,
workspace_ptr,
embedding_dim*sizeof(T),
m.source_rank);
nvshmemx_putmem_nbi_warp(
&m,
&m,
sizeof(VectorMetadata<T>),
m.source_rank);
}
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::bp_compute() {
// Data matrices
const auto& input = this->get_prev_activations();
const auto& local_output_grad = dynamic_cast<const LocalMat&>(this->get_local_prev_error_signals());
// Dimensions
const size_t input_size = this->get_input_size();
const size_t mini_batch_size = input.Width();
const size_t local_mini_batch_size = local_output_grad.Width();
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
// Synchronize non-blocking barrier
// Note: Make sure NVSHMEM workspaces are ready to recieve gradients.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize NVSHMEM buffer for gradient w.r.t. embeddings
LocalMat workspace(
m_embedding_dim,
input_size * mini_batch_size,
m_workspace_buffer,
m_embedding_dim);
// Send gradients to owner processes
if (!local_output_grad.IsEmpty()) {
constexpr size_t block_size = 32;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = input_size;
grid_dims.y = local_mini_batch_size;
launch_cuda_kernel(
send_gradients_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
stream,
m_embedding_dim,
Size2{local_mini_batch_size, input_size},
local_output_grad.LockedBuffer(),
Size2{size_t(local_output_grad.LDim()), 1},
m_metadata_buffer,
Size2{input_size, 1},
workspace.Buffer(),
Size2{size_t(workspace.LDim()), 1},
size_t(input.RowShift()),
size_t(input.RowStride()));
}
nvshmemx_quiet_on_stream(stream);
// Non-blocking barrier
// Note: Gradients have been sent.
nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request);
// Use dense optimizer if needed
if (!m_sparse_sgd) {
// Create buffer for dense gradients
const auto& embeddings = this->weights_values(0);
std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> embeddings_grad(
embeddings.Construct(embeddings.Grid(), embeddings.Root()));
embeddings_grad->AlignWith(embeddings);
El::Zeros(*embeddings_grad, embeddings.Height(), embeddings.Width());
auto& local_embeddings_grad = dynamic_cast<LocalMat&>(embeddings_grad->Matrix());
// Apply SGD step to convert sparse gradients to dense gradients
apply_sparse_sgd_step(
input_size * mini_batch_size,
local_embeddings_grad);
// Send dense gradients to dense optimizer
auto* opt = this->get_weights(0).get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(*embeddings_grad);
}
}
}
// ---------------------------------------------
// Sparse SGD
// ---------------------------------------------
namespace
{
/** Sparse SGD on local embeddings.
*
* Block dimensions: 32 x 1 x 1
*
* Grid dimensions: num_gradients x 1 x 1
*/
template <typename T>
__global__ void sgd_kernel(
T learning_rate,
size_t embedding_dim,
size_t num_gradients,
const VectorMetadata<T>* __restrict__ metadata,
const T* __restrict__ embeddings_grad,
Size2 embeddings_grad_strides,
T* __restrict__ embeddings,
Size2 embeddings_strides,
size_t rank) {
// Indices
const size_t tid = threadIdx.x;
const size_t bid = blockIdx.x;
const size_t nblocks = gridDim.x;
constexpr size_t warp_size = 32;
// Assign requests to CUDA blocks
const size_t gradients_per_block = (num_gradients + nblocks - 1) / nblocks;
const size_t i_start = bid * gradients_per_block;
const size_t i_end = cuda::min((bid+1) * gradients_per_block, num_gradients);
for (size_t i = i_start; i < i_end; ++i) {
const auto& m = metadata[i];
if (m.is_active && m.source_rank == rank) {
// Update embedding vector with gradient
const auto* __restrict__ dw = &embeddings_grad[m.target_index * embeddings_grad_strides[0]];
auto* __restrict__ w = &embeddings[m.source_index * embeddings_strides[0]];
for (size_t k = tid; k < embedding_dim; k += warp_size) {
cuda::atomic_add(&w[k], -learning_rate * dw[k]);
}
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void dist_embedding_layer<TensorDataType,Layout,Device>::apply_sparse_sgd_step(
size_t num_gradients,
LocalMat& local_embeddings) {
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
// Synchronize non-blocking barrier
// Note: Make sure gradients have been received.
auto& comm = *this->get_comm();
comm.wait(m_nb_barrier_request);
// Initialize SHMEM buffer for gradient w.r.t. embeddings
LocalMat local_embeddings_grad(
m_embedding_dim,
num_gradients,
m_workspace_buffer,
m_embedding_dim);
// Sparse SGD on local embeddings
const size_t rank = comm.get_rank_in_trainer();
constexpr size_t block_size = 32;
const size_t grid_size = num_gradients;
launch_cuda_kernel(
sgd_kernel<TensorDataType>,
grid_size,
block_size,
0,
stream,
m_learning_rate,
m_embedding_dim,
num_gradients,
m_metadata_buffer,
local_embeddings_grad.LockedBuffer(),
Size2{size_t(local_embeddings_grad.LDim()), 1},
local_embeddings.Buffer(),
Size2{size_t(local_embeddings.LDim()), 1},
rank);
}
// ---------------------------------------------
// Explicit template instantiation
// ---------------------------------------------
/// @todo fp16
template class dist_embedding_layer<
float, data_layout::DATA_PARALLEL, El::Device::GPU>;
template class dist_embedding_layer<
double, data_layout::DATA_PARALLEL, El::Device::GPU>;
} // namespace lbann
#endif // LBANN_HAS_NVSHMEM
|
f4d7d3d4521cb78a0487a7ecc53599cc9441846d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// device = GPU
// host = CPU
// This will run on the device (GPU).
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
// host copies of a, b, c
int a, b, c;
// device copies of a, b, c
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = 4;
b = 6;
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
printf("%d + %d = %d", a, b, c);
return 0;
}
|
f4d7d3d4521cb78a0487a7ecc53599cc9441846d.cu
|
#include "cuda_runtime.h"
#include <stdio.h>
// device = GPU
// host = CPU
// This will run on the device (GPU).
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
// host copies of a, b, c
int a, b, c;
// device copies of a, b, c
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 4;
b = 6;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("%d + %d = %d", a, b, c);
return 0;
}
|
b44906c16731f2c9a803224ccd643617c2802bcc.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <common/allocatorAdapter.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/mr/device/allocator.hpp>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void findMaxAbsOfColumns(T *input, int n_rows, int n_cols, T *max_vals,
std::shared_ptr<raft::mr::device::allocator> allocator,
hipStream_t stream, bool row_major = false) {
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
auto n = n_cols;
ML::thrustAllocatorAdapter alloc(allocator, stream);
auto execution_policy = thrust::hip::par(alloc).on(stream);
if (row_major) {
thrust::for_each(execution_policy, counting, counting + n_rows,
[=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx;
int end = d_i + (m * n);
for (int i = d_i; i < end; i = i + m) {
T val = input[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
} else {
thrust::for_each(execution_policy, counting, counting + n_cols,
[=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx * m;
int end = d_i + m;
for (int i = d_i; i < end; i++) {
T val = input[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
}
}
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void flip(T *input, int n_rows, int n_cols, T *max_vals,
std::shared_ptr<raft::mr::device::allocator> allocator,
hipStream_t stream) {
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
ML::thrustAllocatorAdapter alloc(allocator, stream);
auto execution_policy = thrust::hip::par(alloc).on(stream);
thrust::for_each(execution_policy, counting, counting + n_cols,
[=] __device__(int idx) {
int d_i = idx * m;
int end = d_i + m;
if (max_vals[idx] < 0.0) {
for (int i = d_i; i < end; i++) {
input[i] = -input[i];
}
}
});
}
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen vectors
* @input param handle: the internal cuml handle object
* @input/output param input param input: input matrix that will be used to determine the sign.
* @input param input_desc: MNMG description of the input
* @input/output param components: components matrix.
* @input param n_components: number of columns of components matrix
* @input param streams: cuda streams
* @input param n_streams: number of streams
* @{
*/
template <typename T>
void sign_flip_imp(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input,
Matrix::PartDescriptor &input_desc, T *components,
int n_components, hipStream_t *streams, int n_stream) {
int rank = handle.get_comms().get_rank();
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
std::vector<Matrix::RankSizePair *> local_blocks =
input_desc.blocksOwnedBy(rank);
device_buffer<T> max_vals(
allocator, streams[0],
::max(size_t(comm.get_size()), local_blocks.size()) * n_components);
for (int i = 0; i < input.size(); i++) {
T *mv_loc = max_vals.data() + (i * n_components);
findMaxAbsOfColumns(input[i]->ptr, local_blocks[i]->size, n_components,
mv_loc, allocator, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
findMaxAbsOfColumns(max_vals.data(), n_components, local_blocks.size(),
max_vals.data(), allocator, streams[0], true);
comm.allgather(max_vals.data(), max_vals.data(), n_components, streams[0]);
comm.sync_stream(streams[0]);
findMaxAbsOfColumns(max_vals.data(), n_components, comm.get_size(),
max_vals.data(), allocator, streams[0], true);
for (int i = 0; i < local_blocks.size(); i++) {
flip(input[i]->ptr, local_blocks[i]->size, n_components, max_vals.data(),
allocator, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
flip(components, input_desc.N, n_components, max_vals.data(), allocator,
streams[0]);
}
void sign_flip(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc, float *components,
int n_components, hipStream_t *streams, int n_stream) {
sign_flip_imp(handle, input_data, input_desc, components, n_components,
streams, n_stream);
}
void sign_flip(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc, double *components,
int n_components, hipStream_t *streams, int n_stream) {
sign_flip_imp(handle, input_data, input_desc, components, n_components,
streams, n_stream);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
b44906c16731f2c9a803224ccd643617c2802bcc.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <common/allocatorAdapter.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/mr/device/allocator.hpp>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void findMaxAbsOfColumns(T *input, int n_rows, int n_cols, T *max_vals,
std::shared_ptr<raft::mr::device::allocator> allocator,
cudaStream_t stream, bool row_major = false) {
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
auto n = n_cols;
ML::thrustAllocatorAdapter alloc(allocator, stream);
auto execution_policy = thrust::cuda::par(alloc).on(stream);
if (row_major) {
thrust::for_each(execution_policy, counting, counting + n_rows,
[=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx;
int end = d_i + (m * n);
for (int i = d_i; i < end; i = i + m) {
T val = input[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
} else {
thrust::for_each(execution_policy, counting, counting + n_cols,
[=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx * m;
int end = d_i + m;
for (int i = d_i; i < end; i++) {
T val = input[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
}
}
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void flip(T *input, int n_rows, int n_cols, T *max_vals,
std::shared_ptr<raft::mr::device::allocator> allocator,
cudaStream_t stream) {
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
ML::thrustAllocatorAdapter alloc(allocator, stream);
auto execution_policy = thrust::cuda::par(alloc).on(stream);
thrust::for_each(execution_policy, counting, counting + n_cols,
[=] __device__(int idx) {
int d_i = idx * m;
int end = d_i + m;
if (max_vals[idx] < 0.0) {
for (int i = d_i; i < end; i++) {
input[i] = -input[i];
}
}
});
}
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen vectors
* @input param handle: the internal cuml handle object
* @input/output param input param input: input matrix that will be used to determine the sign.
* @input param input_desc: MNMG description of the input
* @input/output param components: components matrix.
* @input param n_components: number of columns of components matrix
* @input param streams: cuda streams
* @input param n_streams: number of streams
* @{
*/
template <typename T>
void sign_flip_imp(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input,
Matrix::PartDescriptor &input_desc, T *components,
int n_components, cudaStream_t *streams, int n_stream) {
int rank = handle.get_comms().get_rank();
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
std::vector<Matrix::RankSizePair *> local_blocks =
input_desc.blocksOwnedBy(rank);
device_buffer<T> max_vals(
allocator, streams[0],
std::max(size_t(comm.get_size()), local_blocks.size()) * n_components);
for (int i = 0; i < input.size(); i++) {
T *mv_loc = max_vals.data() + (i * n_components);
findMaxAbsOfColumns(input[i]->ptr, local_blocks[i]->size, n_components,
mv_loc, allocator, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
findMaxAbsOfColumns(max_vals.data(), n_components, local_blocks.size(),
max_vals.data(), allocator, streams[0], true);
comm.allgather(max_vals.data(), max_vals.data(), n_components, streams[0]);
comm.sync_stream(streams[0]);
findMaxAbsOfColumns(max_vals.data(), n_components, comm.get_size(),
max_vals.data(), allocator, streams[0], true);
for (int i = 0; i < local_blocks.size(); i++) {
flip(input[i]->ptr, local_blocks[i]->size, n_components, max_vals.data(),
allocator, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
flip(components, input_desc.N, n_components, max_vals.data(), allocator,
streams[0]);
}
void sign_flip(raft::handle_t &handle,
std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc, float *components,
int n_components, cudaStream_t *streams, int n_stream) {
sign_flip_imp(handle, input_data, input_desc, components, n_components,
streams, n_stream);
}
void sign_flip(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc, double *components,
int n_components, cudaStream_t *streams, int n_stream) {
sign_flip_imp(handle, input_data, input_desc, components, n_components,
streams, n_stream);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
0b3f1fa02a7c0060d397a2a8e2e030494db25c4b.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by 21459 on 11/25/2020.
//
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "book.h"
#include "cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( hipMemcpyToSymbol( s, temp_s,
sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
}
|
0b3f1fa02a7c0060d397a2a8e2e030494db25c4b.cu
|
//
// Created by 21459 on 11/25/2020.
//
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include "book.h"
#include "cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( cudaMemcpyToSymbol( s, temp_s,
sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
}
|
220120a3634b3e5754c40575fa4f6f5392807d5c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
/**************************************************************
Common functions
**************************************************************/
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
/**************************************************************
CPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
void square_vector(float *z, float *znorm, int n){
for (int i = 0; i < n; i++) {
float zi = z[i];
float zsqri = zi * zi;
z[i] = zsqri;
znorm[0] += zsqri;
}
}
// Function for computing f (the secular function of interest) at a given point x
float secfunc(float *d, float *zsqr, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqr[i] / (d[i] - x);
}
return rho + sum;
}
// Function for computing f' (the prime derivative of the secular function of interest) at a given point x
float secfunc_prime(float *d, float *zsqr, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = d[i];
sum += zsqr[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
float secfunc_second(float *d, float *zsqr, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = d[i];
sum += zsqr[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
float initialization_int(float *d, float *zsqr, float rho, int k, int n){
float d_k = d[k];
float d_kplus1 = d[k + 1];
float zsqr_k = zsqr[k];
float zsqr_kplus1 = zsqr[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(d, zsqr, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
float initialization_ext(float *d, float *zsqr, float *znorm, float rho, int n){
float d_nminus1 = d[n - 1];
float d_nminus2 = d[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqr[n - 1];
float zsqr_nminus2 = zsqr[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(d, zsqr, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
float find_root_int(float *d, float *zsqr, float rho, float x, int k, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float f = secfunc(d, zsqr, rho, x, n);;
float d_k = d[k];
float d_kplus1 = d[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
// Iterate to find the last root (the exterior one)
float find_root_ext(float *d, float *zsqr, float rho, float x, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float d_nminus2 = d[n - 2];
float d_nminus1 = d[n - 1];
float f = secfunc(d, zsqr, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
void find_roots(float *xstar, float *x0, float *d, float *zsqr, float *znorm, float rho, int n, int maxit, float epsilon, float *loss_CPU){
// We make sure that the loss is set to 0
*loss_CPU =0;
for (int i=0; i<n-1; i++){
xstar[i] = find_root_int(d, zsqr, rho, x0[i], i, n, maxit, epsilon, loss_CPU);
}
xstar[n - 1] = find_root_ext(d, zsqr, rho, x0[n - 1], n, maxit, epsilon, loss_CPU);
}
void initialize_x0(float *x0, float *d, float *zsqr, float *znorm, float rho, int n){
for (int i=0; i<n-1; i++){
x0[i] = initialization_int(d, zsqr, rho, i, n);
}
x0[n - 1] = initialization_ext(d, zsqr, znorm, rho, n);
}
/**************************************************************
GPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel_g(float *zsqrGPU, float *znormGPU, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znormGPU, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc_g(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime_g(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second_g(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int_g(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext_g(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc_g(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int_g(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc_g(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int_g(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int_g(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext_g(float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znormGPU[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg_g(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg_g(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg_g(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int_g(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg_g(f, fprime, delta_k, delta_kplus1);
float b = b_gragg_g(f, delta_k, delta_kplus1);
float c = c_gragg_g(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int_g(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext_g(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg_g(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg_g(f, delta_nminus2, delta_nminus1);
float c = c_gragg_g(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext_g(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int_g(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon, float * avloss_GPU){
int i = 0;
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_int_g(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext_g(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon, float* avloss_GPU){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_ext_g(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel_g(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n, int maxit, float epsilon, float *avloss_GPU){
__shared__ float rho_shared, epsilon_shared;
__shared__ int n_shared, maxit_shared;
rho_shared = rho;
epsilon_shared = epsilon;
n_shared = n;
maxit_shared = maxit;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// We make sure that the average loss is set to 0
*avloss_GPU =0;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext_g(dGPU, zsqrGPU, rho_shared, x, n_shared, maxit_shared, epsilon_shared, avloss_GPU);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int_g(dGPU, zsqrGPU, rho_shared, x, idx - 1, n_shared, maxit_shared, epsilon_shared, avloss_GPU);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel_g(float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
__shared__ float znormGPU_shared, rho_shared;
__shared__ int n_shared;
znormGPU_shared = *znormGPU;
rho_shared = rho;
n_shared = n;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext_g(dGPU, zsqrGPU, &znormGPU_shared, rho_shared, n_shared);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int_g(dGPU, zsqrGPU, rho_shared, idx - 1, n_shared);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test){
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Access for writing ******************/
FILE *f = fopen("result_mem.csv", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
fprintf(f, "n;iter;niter;time_GPU_mem;time_CPU;loss_GPU_mem;loss_CPU\n");
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *zsqr, *znorm, *x0, *xstar, *loss_GPU, *loss_CPU;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
int n, nlow, nhigh, step, niter, choice;
printf("\nLowest n to test? \n");
scanf("%d", &nlow);
printf("\nHighest n to test? \n");
scanf("%d", &nhigh);
printf("\nSize of the step? \n");
scanf("%d", &step);
printf("\nNumber of iterations of the same n to avoid stochastic error? \n");
scanf("%d", &niter);
printf("\nDo you wish to test both algorithms (type 1) or GPU only (type 0)\n");
scanf("%d", &choice);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("We compare the chosen algorithms every %d n, for n between %d and %d \n", step, nlow, nhigh);
printf("Each test is repeated %d times \n\n", niter);
printf("\n \n********************** TESTS *********************** \n");
for(n=nlow; n<=nhigh; n+=step){
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
// Memory allocation for data
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
zsqr = (float*)malloc(n*sizeof(float));
for (int iter =0; iter<niter; iter++){
// Memory allocation for computation
znorm = (float*)malloc(sizeof(float));
x0 = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
loss_GPU = (float*)malloc(sizeof(float));
loss_CPU = (float*)malloc(sizeof(float));
// Create instance of class Timer
Timer TimG, TimC;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
gaussian_vector(zsqr, mu_z, sigma_z, n);
/*************************************************************************
********************************* GPU ************************************
*************************************************************************/
// We first wake up the GPU if first iteration
if (iter==0){
int *testGPU;
hipMalloc(&testGPU, 1024*sizeof(int));
hipLaunchKernelGGL(( wake_up) , dim3(1024), dim3(512), 0, 0, testGPU);
hipFree(testGPU);
}
// Start timer GPU
TimG.start();
/***************** GPU memory alloc *****************/
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znormGPU, *x0GPU, *xstarGPU, *avloss_GPU;
// Create memory space for vectors on GPU
hipMalloc(&dGPU, n*sizeof(float));
hipMalloc(&zsqrGPU, n*sizeof(float));
hipMalloc(&znormGPU, sizeof(float));
hipMalloc(&x0GPU, n*sizeof(float));
hipMalloc(&avloss_GPU, sizeof(float));
// Container for the results
hipMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
hipMemcpy(dGPU, d, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(zsqrGPU, z, n*sizeof(float), hipMemcpyHostToDevice);
// We first compute the square and squared norm
hipLaunchKernelGGL(( square_kernel_g) , dim3(1024), dim3(512), 0, 0, zsqrGPU, znormGPU, n);
// Initialization of x0 on GPU
hipLaunchKernelGGL(( initialize_x0_kernel_g) , dim3(1024), dim3(512), 0, 0, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
hipLaunchKernelGGL(( find_roots_kernel_g) , dim3(1024), dim3(512), 0, 0, xstarGPU, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n, maxit, epsilon, avloss_GPU);
// Transfer results on CPU to print it
hipMemcpy(xstar, xstarGPU, n*sizeof(float), hipMemcpyDeviceToHost);
// End timer
TimG.add();
// Collect the average spectral loss
hipMemcpy(loss_GPU, avloss_GPU, sizeof(float), hipMemcpyDeviceToHost);
// Free memory on GPU
hipFree(dGPU);
hipFree(zsqrGPU);
hipFree(znormGPU);
hipFree(x0GPU);
hipFree(xstarGPU);
hipFree(avloss_GPU);
/*************************************************************************
********************************* CPU ************************************
*************************************************************************/
if (choice ==1){
// Start timer CPU
TimC.start();
// We first compute the square and squared norm
square_vector(zsqr, znorm, n);
// Initialization of x0
initialize_x0(x0, d, zsqr, znorm, rho, n);
/***************** Root computation ****************/
// Find roots
find_roots(xstar, x0, d, zsqr, znorm, rho, n, maxit, epsilon, loss_CPU);
// End timer
TimC.add();
}
// Record the performance
fprintf(f, "%d;%d;%d;%f;%f;%f;%f\n", n, iter, niter, (float)TimG.getsum(), (float)TimC.getsum(), *loss_GPU, *loss_CPU);
// Free memory used for computation on CPU
free(znorm);
free(xstar);
free(loss_CPU);
free(loss_GPU);
}
printf("%d has been tested\n", n);
// Free memory used to store data on CPU
free(d);
free(z);
free(zsqr);
}
printf("\n \n");
// We close the access to the file
fclose(f);
}
|
220120a3634b3e5754c40575fa4f6f5392807d5c.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
/**************************************************************
Common functions
**************************************************************/
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
/**************************************************************
CPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
void square_vector(float *z, float *znorm, int n){
for (int i = 0; i < n; i++) {
float zi = z[i];
float zsqri = zi * zi;
z[i] = zsqri;
znorm[0] += zsqri;
}
}
// Function for computing f (the secular function of interest) at a given point x
float secfunc(float *d, float *zsqr, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqr[i] / (d[i] - x);
}
return rho + sum;
}
// Function for computing f' (the prime derivative of the secular function of interest) at a given point x
float secfunc_prime(float *d, float *zsqr, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = d[i];
sum += zsqr[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
float secfunc_second(float *d, float *zsqr, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = d[i];
sum += zsqr[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
float initialization_int(float *d, float *zsqr, float rho, int k, int n){
float d_k = d[k];
float d_kplus1 = d[k + 1];
float zsqr_k = zsqr[k];
float zsqr_kplus1 = zsqr[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(d, zsqr, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
float initialization_ext(float *d, float *zsqr, float *znorm, float rho, int n){
float d_nminus1 = d[n - 1];
float d_nminus2 = d[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqr[n - 1];
float zsqr_nminus2 = zsqr[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(d, zsqr, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
float find_root_int(float *d, float *zsqr, float rho, float x, int k, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float f = secfunc(d, zsqr, rho, x, n);;
float d_k = d[k];
float d_kplus1 = d[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
// Iterate to find the last root (the exterior one)
float find_root_ext(float *d, float *zsqr, float rho, float x, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float d_nminus2 = d[n - 2];
float d_nminus1 = d[n - 1];
float f = secfunc(d, zsqr, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
void find_roots(float *xstar, float *x0, float *d, float *zsqr, float *znorm, float rho, int n, int maxit, float epsilon, float *loss_CPU){
// We make sure that the loss is set to 0
*loss_CPU =0;
for (int i=0; i<n-1; i++){
xstar[i] = find_root_int(d, zsqr, rho, x0[i], i, n, maxit, epsilon, loss_CPU);
}
xstar[n - 1] = find_root_ext(d, zsqr, rho, x0[n - 1], n, maxit, epsilon, loss_CPU);
}
void initialize_x0(float *x0, float *d, float *zsqr, float *znorm, float rho, int n){
for (int i=0; i<n-1; i++){
x0[i] = initialization_int(d, zsqr, rho, i, n);
}
x0[n - 1] = initialization_ext(d, zsqr, znorm, rho, n);
}
/**************************************************************
GPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel_g(float *zsqrGPU, float *znormGPU, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znormGPU, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc_g(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime_g(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second_g(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int_g(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext_g(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc_g(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int_g(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc_g(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int_g(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int_g(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext_g(float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znormGPU[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg_g(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg_g(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg_g(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int_g(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg_g(f, fprime, delta_k, delta_kplus1);
float b = b_gragg_g(f, delta_k, delta_kplus1);
float c = c_gragg_g(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int_g(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext_g(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg_g(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg_g(f, delta_nminus2, delta_nminus1);
float c = c_gragg_g(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext_g(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int_g(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon, float * avloss_GPU){
int i = 0;
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_int_g(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext_g(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon, float* avloss_GPU){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_ext_g(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel_g(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n, int maxit, float epsilon, float *avloss_GPU){
__shared__ float rho_shared, epsilon_shared;
__shared__ int n_shared, maxit_shared;
rho_shared = rho;
epsilon_shared = epsilon;
n_shared = n;
maxit_shared = maxit;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// We make sure that the average loss is set to 0
*avloss_GPU =0;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext_g(dGPU, zsqrGPU, rho_shared, x, n_shared, maxit_shared, epsilon_shared, avloss_GPU);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int_g(dGPU, zsqrGPU, rho_shared, x, idx - 1, n_shared, maxit_shared, epsilon_shared, avloss_GPU);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel_g(float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
__shared__ float znormGPU_shared, rho_shared;
__shared__ int n_shared;
znormGPU_shared = *znormGPU;
rho_shared = rho;
n_shared = n;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext_g(dGPU, zsqrGPU, &znormGPU_shared, rho_shared, n_shared);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int_g(dGPU, zsqrGPU, rho_shared, idx - 1, n_shared);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test){
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Access for writing ******************/
FILE *f = fopen("result_mem.csv", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
fprintf(f, "n;iter;niter;time_GPU_mem;time_CPU;loss_GPU_mem;loss_CPU\n");
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *zsqr, *znorm, *x0, *xstar, *loss_GPU, *loss_CPU;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
int n, nlow, nhigh, step, niter, choice;
printf("\nLowest n to test? \n");
scanf("%d", &nlow);
printf("\nHighest n to test? \n");
scanf("%d", &nhigh);
printf("\nSize of the step? \n");
scanf("%d", &step);
printf("\nNumber of iterations of the same n to avoid stochastic error? \n");
scanf("%d", &niter);
printf("\nDo you wish to test both algorithms (type 1) or GPU only (type 0)\n");
scanf("%d", &choice);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("We compare the chosen algorithms every %d n, for n between %d and %d \n", step, nlow, nhigh);
printf("Each test is repeated %d times \n\n", niter);
printf("\n \n********************** TESTS *********************** \n");
for(n=nlow; n<=nhigh; n+=step){
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
// Memory allocation for data
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
zsqr = (float*)malloc(n*sizeof(float));
for (int iter =0; iter<niter; iter++){
// Memory allocation for computation
znorm = (float*)malloc(sizeof(float));
x0 = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
loss_GPU = (float*)malloc(sizeof(float));
loss_CPU = (float*)malloc(sizeof(float));
// Create instance of class Timer
Timer TimG, TimC;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
gaussian_vector(zsqr, mu_z, sigma_z, n);
/*************************************************************************
********************************* GPU ************************************
*************************************************************************/
// We first wake up the GPU if first iteration
if (iter==0){
int *testGPU;
cudaMalloc(&testGPU, 1024*sizeof(int));
wake_up <<<1024, 512>>> (testGPU);
cudaFree(testGPU);
}
// Start timer GPU
TimG.start();
/***************** GPU memory alloc *****************/
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znormGPU, *x0GPU, *xstarGPU, *avloss_GPU;
// Create memory space for vectors on GPU
cudaMalloc(&dGPU, n*sizeof(float));
cudaMalloc(&zsqrGPU, n*sizeof(float));
cudaMalloc(&znormGPU, sizeof(float));
cudaMalloc(&x0GPU, n*sizeof(float));
cudaMalloc(&avloss_GPU, sizeof(float));
// Container for the results
cudaMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
cudaMemcpy(dGPU, d, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(zsqrGPU, z, n*sizeof(float), cudaMemcpyHostToDevice);
// We first compute the square and squared norm
square_kernel_g <<<1024, 512>>> (zsqrGPU, znormGPU, n);
// Initialization of x0 on GPU
initialize_x0_kernel_g <<<1024, 512>>> (x0GPU, dGPU, zsqrGPU, znormGPU, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
find_roots_kernel_g <<<1024, 512>>> (xstarGPU, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n, maxit, epsilon, avloss_GPU);
// Transfer results on CPU to print it
cudaMemcpy(xstar, xstarGPU, n*sizeof(float), cudaMemcpyDeviceToHost);
// End timer
TimG.add();
// Collect the average spectral loss
cudaMemcpy(loss_GPU, avloss_GPU, sizeof(float), cudaMemcpyDeviceToHost);
// Free memory on GPU
cudaFree(dGPU);
cudaFree(zsqrGPU);
cudaFree(znormGPU);
cudaFree(x0GPU);
cudaFree(xstarGPU);
cudaFree(avloss_GPU);
/*************************************************************************
********************************* CPU ************************************
*************************************************************************/
if (choice ==1){
// Start timer CPU
TimC.start();
// We first compute the square and squared norm
square_vector(zsqr, znorm, n);
// Initialization of x0
initialize_x0(x0, d, zsqr, znorm, rho, n);
/***************** Root computation ****************/
// Find roots
find_roots(xstar, x0, d, zsqr, znorm, rho, n, maxit, epsilon, loss_CPU);
// End timer
TimC.add();
}
// Record the performance
fprintf(f, "%d;%d;%d;%f;%f;%f;%f\n", n, iter, niter, (float)TimG.getsum(), (float)TimC.getsum(), *loss_GPU, *loss_CPU);
// Free memory used for computation on CPU
free(znorm);
free(xstar);
free(loss_CPU);
free(loss_GPU);
}
printf("%d has been tested\n", n);
// Free memory used to store data on CPU
free(d);
free(z);
free(zsqr);
}
printf("\n \n");
// We close the access to the file
fclose(f);
}
|
9ed4168aeb935ac7623f95194bb03b51ae56caaf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2d(int * data)
{
int tid = threadIdx.x;
int block_offset = blockIdx.x * blockDim.x;
int row_offset = blockDim.x * gridDim.x * blockIdx.y;
int gid = row_offset + block_offset + tid;
printf("blockIdx.x : %d, blockIdx.y : %d, threadIdx.x : %d, gid : %d - data : %d \n",
blockIdx.x, blockIdx.y, tid, gid, data[gid]);
}
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = {23,9,4,53,65,12,1,33,22,43,56,4,76,81,94,32};
//
// int * d_data;
// hipMalloc((void**)&d_data, array_byte_size);
// hipMemcpy(d_data, h_data, array_byte_size, hipMemcpyHostToDevice);
//
// dim3 block(4);
// dim3 grid(2,2);
//
// unique_gid_calculation_2d << < grid, block >> > (d_data);
// hipDeviceSynchronize();
//
// hipDeviceReset();
// return 0;
//}
|
9ed4168aeb935ac7623f95194bb03b51ae56caaf.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2d(int * data)
{
int tid = threadIdx.x;
int block_offset = blockIdx.x * blockDim.x;
int row_offset = blockDim.x * gridDim.x * blockIdx.y;
int gid = row_offset + block_offset + tid;
printf("blockIdx.x : %d, blockIdx.y : %d, threadIdx.x : %d, gid : %d - data : %d \n",
blockIdx.x, blockIdx.y, tid, gid, data[gid]);
}
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = {23,9,4,53,65,12,1,33,22,43,56,4,76,81,94,32};
//
// int * d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// dim3 block(4);
// dim3 grid(2,2);
//
// unique_gid_calculation_2d << < grid, block >> > (d_data);
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//}
|
11346946b0156407c40f756f145599106a11dd3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
*** Nirdeshika KC ***
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char i, j;
char password[3];
password[2] = '\0';
for(i='A'; i<='Z'; i++) {
password[0] = i;
for(j='A'; j<='Z'; j++) {
password[1] = j;
if(is_a_match(password)) {
printf("password found: %s\n", password);
} else {
// printf("tried: %s\n", password);
}
}
}
}
int time_diff(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_diff(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
11346946b0156407c40f756f145599106a11dd3f.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
*** Nirdeshika KC ***
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char i, j;
char password[3];
password[2] = '\0';
for(i='A'; i<='Z'; i++) {
password[0] = i;
for(j='A'; j<='Z'; j++) {
password[1] = j;
if(is_a_match(password)) {
printf("password found: %s\n", password);
} else {
// printf("tried: %s\n", password);
}
}
}
}
int time_diff(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<1, 1>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_diff(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
b1e366df184b8e455601e99b544d07715e15aed7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <malloc.h>
#define CUDA_CHECK_RETURN(value) {\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
} //
__global__ void gTranspose0(float* storage_d, float* storage_d_t){
int i=threadIdx.x+blockIdx.x*blockDim.x;
int j=threadIdx.y+blockIdx.y*blockDim.y;
int N=blockDim.x*gridDim.x;
storage_d_t[j+i*N]=storage_d[i+j*N];
}
__global__ void gInitializeMatrixByRows(long long n, double* matrix_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
matrix_d[i+j*N] = (double)(i+j*N);
}
__global__ void gInitializeMatrixByColumns(long long n, double* matrix_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
matrix_d[j+i*N] = (double)(j+i*N);
}
int main(int argc, char *argv[]) {
// :
//hipFuncSetCacheConfig(gInitVectors, hipFuncCachePreferL1);
if (argc < 3) {
printf("Error: run program with 2 args: n, threads per block\n");
return 1;
}
long long n, threads;
n = atoi(argv[1]);
threads = atoi(argv[2]);
double *matrix1_d, *matrix2_d;
for (int i = 0; i < 10; i++) {
CUDA_CHECK_RETURN(hipMalloc((void**)&matrix1_d, n * n * sizeof(double)));
hipLaunchKernelGGL(( gInitializeMatrixByRows) , dim3(n / threads), dim3(threads) , 0, 0, n, matrix1_d);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipGetLastError());
hipFree(matrix1_d);
CUDA_CHECK_RETURN(hipMalloc((void**)&matrix2_d, n * n * sizeof(double)));
hipLaunchKernelGGL(( gInitializeMatrixByColumns) , dim3(n / threads), dim3(threads) , 0, 0, n, matrix2_d);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipGetLastError());
hipFree(matrix2_d);
}
return 0;
}
|
b1e366df184b8e455601e99b544d07715e15aed7.cu
|
#include <stdio.h>
#include <time.h>
#include <malloc.h>
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
} //макрос для обработки ошибок
__global__ void gTranspose0(float* storage_d, float* storage_d_t){
int i=threadIdx.x+blockIdx.x*blockDim.x;
int j=threadIdx.y+blockIdx.y*blockDim.y;
int N=blockDim.x*gridDim.x;
storage_d_t[j+i*N]=storage_d[i+j*N];
}
__global__ void gInitializeMatrixByRows(long long n, double* matrix_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
matrix_d[i+j*N] = (double)(i+j*N);
}
__global__ void gInitializeMatrixByColumns(long long n, double* matrix_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
matrix_d[j+i*N] = (double)(j+i*N);
}
int main(int argc, char *argv[]) {
//установить предпочтительную конфигурацию кэша для текущего устройства:
//cudaFuncSetCacheConfig(gInitVectors, cudaFuncCachePreferL1);
if (argc < 3) {
printf("Error: run program with 2 args: n, threads per block\n");
return 1;
}
long long n, threads;
n = atoi(argv[1]);
threads = atoi(argv[2]);
double *matrix1_d, *matrix2_d;
for (int i = 0; i < 10; i++) {
CUDA_CHECK_RETURN(cudaMalloc((void**)&matrix1_d, n * n * sizeof(double)));
gInitializeMatrixByRows <<< n / threads, threads >>> (n, matrix1_d);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaGetLastError());
cudaFree(matrix1_d);
CUDA_CHECK_RETURN(cudaMalloc((void**)&matrix2_d, n * n * sizeof(double)));
gInitializeMatrixByColumns <<< n / threads, threads >>> (n, matrix2_d);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaGetLastError());
cudaFree(matrix2_d);
}
return 0;
}
|
7a989596e6d1d2dd2fe029573541e704e2c311cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "normalDistribution.h"
#include <cassert>
#include <string>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NORMAL_DIST_BLOCK_SIZE = 512;
} // namespace
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__global__ void normalDistributionKernel(
hiprandState_t* const states, const int numStates, float* const outValues, const int numValues)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numStates)
{
// load random state information from global memory
hiprandState_t localState = states[tid];
for (int index = tid; index < numValues; index += numStates)
{
outValues[index] = hiprand_normal(&localState);
}
// save random state information back to global memory
states[tid] = localState;
}
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
int roundUpBlocks(const int num, const int blockSize)
{
return ((num - 1) / blockSize) + 1;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
NormalDistribution::NormalDistribution(const int numStates, const uint32_t seed)
: mRand(numStates)
{
setSeed(seed, 0);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void NormalDistribution::setSeed(const uint32_t seed, hipStream_t stream)
{
mRand.setSeed(seed, stream);
}
void NormalDistribution::generate(float* const outValues, const int numValues, hipStream_t stream)
{
const dim3 grid(roundUpBlocks(mRand.size(), NORMAL_DIST_BLOCK_SIZE));
const dim3 block(NORMAL_DIST_BLOCK_SIZE);
assert(mRand.size() <= grid.x * block.x);
hipLaunchKernelGGL(( normalDistributionKernel), dim3(grid), dim3(block), 0, stream, mRand.getRandomStates(), mRand.size(), outValues, numValues);
}
} // namespace tts
|
7a989596e6d1d2dd2fe029573541e704e2c311cf.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "normalDistribution.h"
#include <cassert>
#include <string>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NORMAL_DIST_BLOCK_SIZE = 512;
} // namespace
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__global__ void normalDistributionKernel(
curandState_t* const states, const int numStates, float* const outValues, const int numValues)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numStates)
{
// load random state information from global memory
curandState_t localState = states[tid];
for (int index = tid; index < numValues; index += numStates)
{
outValues[index] = curand_normal(&localState);
}
// save random state information back to global memory
states[tid] = localState;
}
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
int roundUpBlocks(const int num, const int blockSize)
{
return ((num - 1) / blockSize) + 1;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
NormalDistribution::NormalDistribution(const int numStates, const uint32_t seed)
: mRand(numStates)
{
setSeed(seed, 0);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void NormalDistribution::setSeed(const uint32_t seed, cudaStream_t stream)
{
mRand.setSeed(seed, stream);
}
void NormalDistribution::generate(float* const outValues, const int numValues, cudaStream_t stream)
{
const dim3 grid(roundUpBlocks(mRand.size(), NORMAL_DIST_BLOCK_SIZE));
const dim3 block(NORMAL_DIST_BLOCK_SIZE);
assert(mRand.size() <= grid.x * block.x);
normalDistributionKernel<<<grid, block, 0, stream>>>(mRand.getRandomStates(), mRand.size(), outValues, numValues);
}
} // namespace tts
|
2cbe95e4f589be00a09e9dbe20d3c5de8b716d32.hip
|
// !!! This is a file automatically generated by hipify!!!
// nvcc fft_cuda_2d.cu -lcublas -lcufft -arch=compute_52 -o fft_cuda_2d
//https://www.researchgate.net/figure/Computing-2D-FFT-of-size-NX-NY-using-CUDAs-cuFFT-library-49-FFT-fast-Fourier_fig3_324060154
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <hipfft.h>
#include "stdio.h"
#include "stdlib.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#define DIM 4*65536//65536 = 256 * 256
#define NX 220
#define NY 220
using namespace std;
int main()
{
int n = 0; //n is the number of the integers in the file ==> 12
int data[220*220];
int x;
ifstream File;
File.open("lenna_grayscale.txt");
if(!File.is_open()){
cout<<"It failed"<<endl;
return 0;
}
while(File>>x){
data[n] = x;
n++;
}
File.close();
cout<<"n : "<<n<<endl;
for(int i=0;i<n;i++){
cout << data[i] << " ";
}
float elapsedTime = 0;
hipfftHandle plan;
hipfftComplex *host_data = (hipfftComplex*)malloc(NX*NY*sizeof(hipfftComplex));
hipfftComplex *fft_data = (hipfftComplex*)malloc(NX*NY*sizeof(hipfftComplex));
hipfftComplex *dev_data;
hipEvent_t start,stop;
//FEED INPUT
srand(time(NULL));
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
host_data[i*NY+j].x = (float)data[i*NY+j]; //rand()/(float)RAND_MAX;
host_data[i*NY+j].y = 0.0;
}
}
//SHOW HOST DATA
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x,host_data[i*NY+1].y);
}
//ALLOCATE GPU MEMORY
hipMalloc((void**)&dev_data,sizeof(hipfftComplex)*NX*NY);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//COPY INPUT
hipMemcpy(dev_data,host_data,NX*NY*sizeof(hipfftComplex),hipMemcpyHostToDevice);
//CREATE CUFFT PLAN
hipfftPlan2d(&plan,NX,NY,HIPFFT_C2C);
//PERFORM COMPUTATION(fft and ifft)
hipfftExecC2C(plan,dev_data,dev_data,HIPFFT_FORWARD);
//COPY BACK RESULTS
hipMemcpy(fft_data,dev_data,sizeof(hipfftComplex)*NX*NY,hipMemcpyDeviceToHost);
ofstream outfile2;
outfile2.open("fft_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
if(j == NY - 1){
outfile2<<fft_data[i*NY+j].x<<endl;
}else{
outfile2<<fft_data[i*NY+j].x<<",";
}
}
}
outfile2.close();
hipfftExecC2C(plan,dev_data,dev_data,HIPFFT_BACKWARD);//https://stackoverflow.com/questions/46562575/how-to-cuda-ifft
//COPY BACK RESULTS
hipMemcpy(host_data,dev_data,sizeof(hipfftComplex)*NX*NY,hipMemcpyDeviceToHost);
//GET CALCULATION TIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
//SHOW RESULTS
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x/(NX*NY),host_data[i*NY+1].y/(NX*NY));
}
ofstream outfile;
outfile.open("output_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
// data2[i*NY+j] = host_data[i*NY+3].x/(NX*NY)
if(j == NY - 1){
outfile<<host_data[i*NY+j].x/(NX*NY)<<endl;
}else{
outfile<<host_data[i*NY+j].x/(NX*NY)<<",";
}
}
}
outfile.close();
//FREEE MEMORY
hipfftDestroy(plan);
hipFree(dev_data);
free(host_data);
printf("elapsed time %f\n",elapsedTime);
printf("CUFFT Calculation COMPLETED IN : % 5.3f ms \n",elapsedTime);
}
|
2cbe95e4f589be00a09e9dbe20d3c5de8b716d32.cu
|
// nvcc fft_cuda_2d.cu -lcublas -lcufft -arch=compute_52 -o fft_cuda_2d
//https://www.researchgate.net/figure/Computing-2D-FFT-of-size-NX-NY-using-CUDAs-cuFFT-library-49-FFT-fast-Fourier_fig3_324060154
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <cufft.h>
#include "stdio.h"
#include "stdlib.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#define DIM 4*65536//65536 = 256 * 256
#define NX 220
#define NY 220
using namespace std;
int main()
{
int n = 0; //n is the number of the integers in the file ==> 12
int data[220*220];
int x;
ifstream File;
File.open("lenna_grayscale.txt");
if(!File.is_open()){
cout<<"It failed"<<endl;
return 0;
}
while(File>>x){
data[n] = x;
n++;
}
File.close();
cout<<"n : "<<n<<endl;
for(int i=0;i<n;i++){
cout << data[i] << " ";
}
float elapsedTime = 0;
cufftHandle plan;
cufftComplex *host_data = (cufftComplex*)malloc(NX*NY*sizeof(cufftComplex));
cufftComplex *fft_data = (cufftComplex*)malloc(NX*NY*sizeof(cufftComplex));
cufftComplex *dev_data;
cudaEvent_t start,stop;
//FEED INPUT
srand(time(NULL));
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
host_data[i*NY+j].x = (float)data[i*NY+j]; //rand()/(float)RAND_MAX;
host_data[i*NY+j].y = 0.0;
}
}
//SHOW HOST DATA
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x,host_data[i*NY+1].y);
}
//ALLOCATE GPU MEMORY
cudaMalloc((void**)&dev_data,sizeof(cufftComplex)*NX*NY);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//COPY INPUT
cudaMemcpy(dev_data,host_data,NX*NY*sizeof(cufftComplex),cudaMemcpyHostToDevice);
//CREATE CUFFT PLAN
cufftPlan2d(&plan,NX,NY,CUFFT_C2C);
//PERFORM COMPUTATION(fft and ifft)
cufftExecC2C(plan,dev_data,dev_data,CUFFT_FORWARD);
//COPY BACK RESULTS
cudaMemcpy(fft_data,dev_data,sizeof(cufftComplex)*NX*NY,cudaMemcpyDeviceToHost);
ofstream outfile2;
outfile2.open("fft_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
if(j == NY - 1){
outfile2<<fft_data[i*NY+j].x<<endl;
}else{
outfile2<<fft_data[i*NY+j].x<<",";
}
}
}
outfile2.close();
cufftExecC2C(plan,dev_data,dev_data,CUFFT_INVERSE);//https://stackoverflow.com/questions/46562575/how-to-cuda-ifft
//COPY BACK RESULTS
cudaMemcpy(host_data,dev_data,sizeof(cufftComplex)*NX*NY,cudaMemcpyDeviceToHost);
//GET CALCULATION TIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
//SHOW RESULTS
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x/(NX*NY),host_data[i*NY+1].y/(NX*NY));
}
ofstream outfile;
outfile.open("output_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
// data2[i*NY+j] = host_data[i*NY+3].x/(NX*NY)
if(j == NY - 1){
outfile<<host_data[i*NY+j].x/(NX*NY)<<endl;
}else{
outfile<<host_data[i*NY+j].x/(NX*NY)<<",";
}
}
}
outfile.close();
//FREEE MEMORY
cufftDestroy(plan);
cudaFree(dev_data);
free(host_data);
printf("elapsed time %f\n",elapsedTime);
printf("CUFFT Calculation COMPLETED IN : % 5.3f ms \n",elapsedTime);
}
|
19b1238ff6e9975dae4145a94ed9b1c2cb106192.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UniformGrid.h"
struct uGrid {
int nx;
int ny;
int nz;
float dx;
float dy;
float dz;
float xmin;
float ymin;
float zmin;
__device__ int size() { return nx * ny * nz; }
__device__ int i_index(const int gl_index) { return (gl_index % nx); }
__device__ int j_index(const int gl_index) { return ((gl_index / nx) % ny); }
__device__ int k_index(const int gl_index) { return (gl_index / (nx * ny)); }
__device__ float3 cellVertex(const int i, const int j, const int k) { return make_float3(xmin + i * dx, ymin + j * dy, zmin + k * dz); }
};
__global__ void volume(uGrid ugrid, float* d_scalar, p_mc::UniformGrid::SurfaceCase sc)
{
using SurfaceCase = p_mc::UniformGrid::SurfaceCase;
float pi{ 3.14159265358979323846f };
// use a 1d grid
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (ugrid.size() <= gl_index)
return;
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
if (i_index >= ugrid.nx || j_index >= ugrid.ny || k_index >= ugrid.nz)
{
return;
}
float val = 0.0f;
float3 v = ugrid.cellVertex(i_index, j_index, k_index);
auto sq = [](const float x) { return x * x; };
auto qu = [](const float x) { return x * x * x * x; };
auto torus_h = [sq](float3 pos, float3 center, float2 param)
{
const float c = sq(param.x);
const float a = sq(param.y);
const float x = sq(pos.x - center.x);
const float y = sq(pos.y - center.y);
const float z = sq(pos.z - center.z);
return sq(x + y + z + c - a) - 4 * c * (x + y);
};
auto torus_v = [sq](float3 pos, float3 center, float2 param)
{
const float c = sq(param.x);
const float a = sq(param.y);
const float x = sq(pos.x - center.x);
const float y = sq(pos.y - center.y);
const float z = sq(pos.z - center.z);
return sq(x + y + z + c - a) - 4 * c * (x + z);
};
auto genusTwo = [sq](const float3 pos)
{
float alpha = 1.0;
float x = (pos.x + 1.0f) / 2.0f;
float y = (pos.y + 1.0f) / 2.0f;
float z = (pos.z + 1.0f) / 2.0f;
x = alpha * (4.0f * x - 2.0f);
y = alpha * (4.0f * y - 2.0f);
z = alpha * (4.0f * z - 2.0f);
float t = 2 * y * (y * y - 3 * x * x) * (1 - z * z);
t += (x * x + y * y) * (x * x + y * y);
t -= (9 * z * z - 1) * (1 - z * z);
return t;
};
auto iWP = [pi](const float3 p)
{
const float alpha = 5.01;
//const float alpha = 1.01;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return cos(x) * cos(y) + cos(y) * cos(z) + cos(z) * cos(x) - cos(x) * cos(y) * cos(z); // iso-value = 0
};
auto pwHybrid = [pi](const float3 p)
{
//const float alpha = 3.01;
const float alpha = 1.01;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return 4.0f * (cosf(x) * cosf(y) + cosf(y) * cosf(z) + cosf(z) * cosf(x)) - 3* cosf(x) * cosf(y) * cosf(z) + 0.8f; // iso-value = 0
};
auto neovius = [pi](const float3 p)
{
const float alpha = 1;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return 3 * (cos(x) + cos(y) + cos(z)) + 4 * cos(x) * cos(y) * cos(z); // iso_value = 0.0
};
auto goursat = [sq,qu](const float3 p)
{
const float a = -1.0f;
const float b = 0.0f;
const float c = 0.5f;
return qu(p.x) + qu(p.y) + qu(p.z) + a * (sq(p.x) + sq(p.y) + sq(p.z)) + b * (sq(p.x) + sq(p.y) + sq(p.z)) + c;
};
auto steinerRoman = [sq, qu](const float3 p)
{
const float alpha = 1.5f;
const float x = alpha * p.x;
const float y = alpha * p.y;
const float z = alpha * p.z;
return sq(x*x + y*y + z*z - 1.0f) - (sq(z - 1) - 2.0f * x*x) * (sq(z + 1) - 2 * y*y);
};
switch (sc) {
case SurfaceCase::Sphere:
val = v.x * v.x + v.y * v.y + v.z * v.z - 0.16;
break;
case SurfaceCase::Torus:
{
const float2 param{ 0.3,0.15 };
const float3 center{ 0,0,0 };
val = torus_h(v, center, param);
break;
}
case SurfaceCase::TwoHoledTorus:
{
const float2 p1{ 0.3,0.15 };
const float t1 = 0.38;
const float t2 = 0.2;
const float delta = 0.38;
const float vt1 = torus_h(v, make_float3(-t1, 0, 0), p1);
const float vt2 = torus_h(v, make_float3(t2, delta, 0), p1);
val = fminf(vt1, vt2);
break;
}
case SurfaceCase::FourHoledTorus:
{
const float2 p2{ 0.3,0.15 };
const float t = 0.38;
const float v1 = torus_h(v, make_float3(-t, 0, 0), p2);
const float v2 = torus_h(v, make_float3(t, 0, 0), p2);
const float v3 = torus_v(v, make_float3(0, 0, -t), p2);
const float v4 = torus_v(v, make_float3(0, 0, t), p2);
val = fminf(v1, v2);
val = fminf(val, v3);
val = fminf(val, v4);
break;
}
case SurfaceCase::GenusTwo:
val = genusTwo(v);
break;
case SurfaceCase::Goursat:
val = goursat(v);
break;
case SurfaceCase::iWP:
val = iWP(v);
break;
case SurfaceCase::pwHybrid:
val = pwHybrid(v);
break;
case SurfaceCase::neovius:
val = neovius(v);
break;
case SurfaceCase::SteinerRoman:
val = steinerRoman(v);
break;
default:
val = genusTwo(v);
}
d_scalar[gl_index] = val;
}
__host__ void p_mc::UniformGrid::generateVolume(const std::array<int, 3>& dim, SurfaceCase sc)
{
// volume size
idim = dim[0];
jdim = dim[1];
kdim = dim[2];
// domain
float xmin = -1.0f;
float ymin = -1.0f;
float zmin = -1.0f;
float xmax = 1.0f;
float ymax = 1.0f;
float zmax = 1.0f;
// define grid size
dx = (xmax - xmin) / (idim - 1.0f);
dy = (ymax - ymin) / (jdim - 1.0f);
dz = (zmax - zmin) / (kdim - 1.0f);
x0 = xmin;
y0 = ymin;
z0 = zmin;
uGrid vol;
vol.nx = idim;
vol.ny = jdim;
vol.nz = kdim;
vol.dx = dx;
vol.dy = dy;
vol.dz = dz;
vol.xmin = x0;
vol.ymin = y0;
vol.zmin = z0;
// allocate data
hipMalloc(&d_scalar, t_size() * sizeof(float));
cudaCheckError();
d_scalar_.reset(d_scalar, hipFree);
// compute volume
const size_t size_ = t_size();
uint b_size = MC_BLOCKSIZE;
uint g_size = (static_cast<uint>(size_) + b_size - 1) / b_size;
//volume << < g_size, b_size >> > (ugrid, sCase);
volume << < g_size, b_size >> > (vol, d_scalar, sc);
hipDeviceSynchronize();
cudaCheckError();
}
|
19b1238ff6e9975dae4145a94ed9b1c2cb106192.cu
|
#include "UniformGrid.h"
struct uGrid {
int nx;
int ny;
int nz;
float dx;
float dy;
float dz;
float xmin;
float ymin;
float zmin;
__device__ int size() { return nx * ny * nz; }
__device__ int i_index(const int gl_index) { return (gl_index % nx); }
__device__ int j_index(const int gl_index) { return ((gl_index / nx) % ny); }
__device__ int k_index(const int gl_index) { return (gl_index / (nx * ny)); }
__device__ float3 cellVertex(const int i, const int j, const int k) { return make_float3(xmin + i * dx, ymin + j * dy, zmin + k * dz); }
};
__global__ void volume(uGrid ugrid, float* d_scalar, p_mc::UniformGrid::SurfaceCase sc)
{
using SurfaceCase = p_mc::UniformGrid::SurfaceCase;
float pi{ 3.14159265358979323846f };
// use a 1d grid
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (ugrid.size() <= gl_index)
return;
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
if (i_index >= ugrid.nx || j_index >= ugrid.ny || k_index >= ugrid.nz)
{
return;
}
float val = 0.0f;
float3 v = ugrid.cellVertex(i_index, j_index, k_index);
auto sq = [](const float x) { return x * x; };
auto qu = [](const float x) { return x * x * x * x; };
auto torus_h = [sq](float3 pos, float3 center, float2 param)
{
const float c = sq(param.x);
const float a = sq(param.y);
const float x = sq(pos.x - center.x);
const float y = sq(pos.y - center.y);
const float z = sq(pos.z - center.z);
return sq(x + y + z + c - a) - 4 * c * (x + y);
};
auto torus_v = [sq](float3 pos, float3 center, float2 param)
{
const float c = sq(param.x);
const float a = sq(param.y);
const float x = sq(pos.x - center.x);
const float y = sq(pos.y - center.y);
const float z = sq(pos.z - center.z);
return sq(x + y + z + c - a) - 4 * c * (x + z);
};
auto genusTwo = [sq](const float3 pos)
{
float alpha = 1.0;
float x = (pos.x + 1.0f) / 2.0f;
float y = (pos.y + 1.0f) / 2.0f;
float z = (pos.z + 1.0f) / 2.0f;
x = alpha * (4.0f * x - 2.0f);
y = alpha * (4.0f * y - 2.0f);
z = alpha * (4.0f * z - 2.0f);
float t = 2 * y * (y * y - 3 * x * x) * (1 - z * z);
t += (x * x + y * y) * (x * x + y * y);
t -= (9 * z * z - 1) * (1 - z * z);
return t;
};
auto iWP = [pi](const float3 p)
{
const float alpha = 5.01;
//const float alpha = 1.01;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return cos(x) * cos(y) + cos(y) * cos(z) + cos(z) * cos(x) - cos(x) * cos(y) * cos(z); // iso-value = 0
};
auto pwHybrid = [pi](const float3 p)
{
//const float alpha = 3.01;
const float alpha = 1.01;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return 4.0f * (cosf(x) * cosf(y) + cosf(y) * cosf(z) + cosf(z) * cosf(x)) - 3* cosf(x) * cosf(y) * cosf(z) + 0.8f; // iso-value = 0
};
auto neovius = [pi](const float3 p)
{
const float alpha = 1;
const float x = alpha * (p.x + 1) * pi;
const float y = alpha * (p.y + 1) * pi;
const float z = alpha * (p.z + 1) * pi;
return 3 * (cos(x) + cos(y) + cos(z)) + 4 * cos(x) * cos(y) * cos(z); // iso_value = 0.0
};
auto goursat = [sq,qu](const float3 p)
{
const float a = -1.0f;
const float b = 0.0f;
const float c = 0.5f;
return qu(p.x) + qu(p.y) + qu(p.z) + a * (sq(p.x) + sq(p.y) + sq(p.z)) + b * (sq(p.x) + sq(p.y) + sq(p.z)) + c;
};
auto steinerRoman = [sq, qu](const float3 p)
{
const float alpha = 1.5f;
const float x = alpha * p.x;
const float y = alpha * p.y;
const float z = alpha * p.z;
return sq(x*x + y*y + z*z - 1.0f) - (sq(z - 1) - 2.0f * x*x) * (sq(z + 1) - 2 * y*y);
};
switch (sc) {
case SurfaceCase::Sphere:
val = v.x * v.x + v.y * v.y + v.z * v.z - 0.16;
break;
case SurfaceCase::Torus:
{
const float2 param{ 0.3,0.15 };
const float3 center{ 0,0,0 };
val = torus_h(v, center, param);
break;
}
case SurfaceCase::TwoHoledTorus:
{
const float2 p1{ 0.3,0.15 };
const float t1 = 0.38;
const float t2 = 0.2;
const float delta = 0.38;
const float vt1 = torus_h(v, make_float3(-t1, 0, 0), p1);
const float vt2 = torus_h(v, make_float3(t2, delta, 0), p1);
val = fminf(vt1, vt2);
break;
}
case SurfaceCase::FourHoledTorus:
{
const float2 p2{ 0.3,0.15 };
const float t = 0.38;
const float v1 = torus_h(v, make_float3(-t, 0, 0), p2);
const float v2 = torus_h(v, make_float3(t, 0, 0), p2);
const float v3 = torus_v(v, make_float3(0, 0, -t), p2);
const float v4 = torus_v(v, make_float3(0, 0, t), p2);
val = fminf(v1, v2);
val = fminf(val, v3);
val = fminf(val, v4);
break;
}
case SurfaceCase::GenusTwo:
val = genusTwo(v);
break;
case SurfaceCase::Goursat:
val = goursat(v);
break;
case SurfaceCase::iWP:
val = iWP(v);
break;
case SurfaceCase::pwHybrid:
val = pwHybrid(v);
break;
case SurfaceCase::neovius:
val = neovius(v);
break;
case SurfaceCase::SteinerRoman:
val = steinerRoman(v);
break;
default:
val = genusTwo(v);
}
d_scalar[gl_index] = val;
}
__host__ void p_mc::UniformGrid::generateVolume(const std::array<int, 3>& dim, SurfaceCase sc)
{
// volume size
idim = dim[0];
jdim = dim[1];
kdim = dim[2];
// domain
float xmin = -1.0f;
float ymin = -1.0f;
float zmin = -1.0f;
float xmax = 1.0f;
float ymax = 1.0f;
float zmax = 1.0f;
// define grid size
dx = (xmax - xmin) / (idim - 1.0f);
dy = (ymax - ymin) / (jdim - 1.0f);
dz = (zmax - zmin) / (kdim - 1.0f);
x0 = xmin;
y0 = ymin;
z0 = zmin;
uGrid vol;
vol.nx = idim;
vol.ny = jdim;
vol.nz = kdim;
vol.dx = dx;
vol.dy = dy;
vol.dz = dz;
vol.xmin = x0;
vol.ymin = y0;
vol.zmin = z0;
// allocate data
cudaMalloc(&d_scalar, t_size() * sizeof(float));
cudaCheckError();
d_scalar_.reset(d_scalar, cudaFree);
// compute volume
const size_t size_ = t_size();
uint b_size = MC_BLOCKSIZE;
uint g_size = (static_cast<uint>(size_) + b_size - 1) / b_size;
//volume << < g_size, b_size >> > (ugrid, sCase);
volume << < g_size, b_size >> > (vol, d_scalar, sc);
cudaDeviceSynchronize();
cudaCheckError();
}
|
d64e3f4657c49310a9499f94fff505c3776625ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_shared_transactions_worker.cuh"
const int BLOCKDIM_Y = 8;
const int BLOCKDIM_X = 32;
const int ELEMENT_IN_THREAD_WIDTH = 4;
const int BLOCK_ELEMENT_X = BLOCKDIM_X * ELEMENT_IN_THREAD_WIDTH;
__device__ unsigned char get_element(unsigned char * array, unsigned int height, unsigned int width, unsigned int width_size, size_t pitch)
{
return (array + height * pitch)[width];
}
__device__ unsigned int get_index(unsigned int height, unsigned int width, unsigned int width_size, size_t pitch)
{
return height * pitch + width;
}
__global__ void gpu_shared_transactions_filter(
unsigned char * original_extended_image,
unsigned int original_width,
size_t original_pitch,
unsigned char * image_result,
unsigned int result_width,
unsigned int result_height,
size_t result_pitch,
unsigned int devision_coefficent
)
{
int result_current_width = (blockIdx.x * BLOCK_ELEMENT_X) + (threadIdx.x * ELEMENT_IN_THREAD_WIDTH);
int result_current_height = (blockDim.y * blockIdx.y) + threadIdx.y;
int original_current_width = result_current_width + 1;
int original_current_height = result_current_height + 1;
__shared__ unsigned char temp_image[BLOCKDIM_Y + 2][BLOCK_ELEMENT_X + 2];
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height,
original_current_width,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 3,
original_width,
original_pitch
);
{
if (threadIdx.x == 0 && threadIdx.y == 0)
{
temp_image[0][0] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width - 1,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1 && threadIdx.y == 0)
{
temp_image[0][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1 && threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.x == 0 && threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][0] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width - 1,
original_width,
original_pitch
);
}
}
{
if (threadIdx.x == 0)
{
temp_image[threadIdx.y + 1][0] =
get_element(
original_extended_image,
original_current_height,
original_current_width - 1,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1)
{
temp_image[threadIdx.y + 1][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.y == 0)
{
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 3,
original_width,
original_pitch
);
}
if (threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 3,
original_width,
original_pitch
);
}
}
__syncthreads();
image_result[get_index(result_current_height, result_current_width, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 1, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 2, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 3, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[2][2])
)
/ devision_coefficent
);
}
void check_cuda_status(hipError_t cuda_status)
{
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
}
Result perform_GPU_shared_transactions_worker(Task task)
{
hipEvent_t start_time, stop_time;
hipEventCreate(&start_time);
hipEventCreate(&stop_time);
size_t image_original_pitch;
size_t image_result_pitch;
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status = hipMallocPitch(
(void**)(&image_original),
&image_original_pitch,
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.height
);
check_cuda_status(cuda_status);
cuda_status = hipMemcpy2D(
image_original,
image_original_pitch,
task.image.matrix.matrix,
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.height,
hipMemcpyHostToDevice
);
check_cuda_status(cuda_status);
cuda_status = hipMallocPitch(
(void**)(&image_result),
&image_result_pitch,
task.work_matrix.width * sizeof(unsigned char),
task.image.matrix.height
);
check_cuda_status(cuda_status);
dim3 block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid;
grid.x = task.work_matrix.width / BLOCK_ELEMENT_X;
if (task.work_matrix.width % BLOCK_ELEMENT_X != 0)
grid.x += 1;
grid.y = task.work_matrix.height / BLOCKDIM_Y;
if (task.work_matrix.height % BLOCKDIM_Y != 0)
grid.y += 1;
hipEventRecord(start_time);
hipLaunchKernelGGL(( gpu_shared_transactions_filter), dim3(grid), dim3(block), 0, 0,
image_original,
task.image.matrix.width,
image_original_pitch,
image_result,
task.work_matrix.width,
task.work_matrix.height,
image_result_pitch,
task.division_coef
);
hipDeviceSynchronize();
hipEventRecord(stop_time);
hipEventSynchronize(stop_time);
Result result;
hipEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = hipMemcpy2D(
task.work_matrix.matrix,
task.work_matrix.width * sizeof(unsigned char),
image_result,
image_result_pitch,
task.work_matrix.width * sizeof(unsigned char),
task.work_matrix.height,
hipMemcpyDeviceToHost
);
check_cuda_status(cuda_status);
result.result = task.work_matrix;
hipEventElapsedTime(&result.time, start_time, stop_time);
return result;
}
|
d64e3f4657c49310a9499f94fff505c3776625ca.cu
|
#include "gpu_shared_transactions_worker.cuh"
const int BLOCKDIM_Y = 8;
const int BLOCKDIM_X = 32;
const int ELEMENT_IN_THREAD_WIDTH = 4;
const int BLOCK_ELEMENT_X = BLOCKDIM_X * ELEMENT_IN_THREAD_WIDTH;
__device__ unsigned char get_element(unsigned char * array, unsigned int height, unsigned int width, unsigned int width_size, size_t pitch)
{
return (array + height * pitch)[width];
}
__device__ unsigned int get_index(unsigned int height, unsigned int width, unsigned int width_size, size_t pitch)
{
return height * pitch + width;
}
__global__ void gpu_shared_transactions_filter(
unsigned char * original_extended_image,
unsigned int original_width,
size_t original_pitch,
unsigned char * image_result,
unsigned int result_width,
unsigned int result_height,
size_t result_pitch,
unsigned int devision_coefficent
)
{
int result_current_width = (blockIdx.x * BLOCK_ELEMENT_X) + (threadIdx.x * ELEMENT_IN_THREAD_WIDTH);
int result_current_height = (blockDim.y * blockIdx.y) + threadIdx.y;
int original_current_width = result_current_width + 1;
int original_current_height = result_current_height + 1;
__shared__ unsigned char temp_image[BLOCKDIM_Y + 2][BLOCK_ELEMENT_X + 2];
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height,
original_current_width,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 3,
original_width,
original_pitch
);
{
if (threadIdx.x == 0 && threadIdx.y == 0)
{
temp_image[0][0] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width - 1,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1 && threadIdx.y == 0)
{
temp_image[0][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1 && threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.x == 0 && threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][0] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width - 1,
original_width,
original_pitch
);
}
}
{
if (threadIdx.x == 0)
{
temp_image[threadIdx.y + 1][0] =
get_element(
original_extended_image,
original_current_height,
original_current_width - 1,
original_width,
original_pitch
);
}
if (threadIdx.x == BLOCKDIM_X - 1)
{
temp_image[threadIdx.y + 1][BLOCK_ELEMENT_X + 1] =
get_element(
original_extended_image,
original_current_height,
original_current_width + 4,
original_width,
original_pitch
);
}
if (threadIdx.y == 0)
{
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[0][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height - 1,
original_current_width + 3,
original_width,
original_pitch
);
}
if (threadIdx.y == BLOCKDIM_Y - 1)
{
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 1,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 2,
original_width,
original_pitch
);
temp_image[BLOCKDIM_Y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 4] =
get_element(
original_extended_image,
original_current_height + 1,
original_current_width + 3,
original_width,
original_pitch
);
}
}
__syncthreads();
image_result[get_index(result_current_height, result_current_width, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) ] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 1, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 1] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 1] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 2, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 2] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 2] * (filter[2][2])
)
/ devision_coefficent
);
image_result[get_index(result_current_height, result_current_width + 3, result_width, result_pitch)] = (
(
temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[0][0])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[0][1])
+ temp_image[threadIdx.y ][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[0][2])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[1][0])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[1][1])
+ temp_image[threadIdx.y + 1][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[1][2])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 3] * (filter[2][0])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 1 + 3] * (filter[2][1])
+ temp_image[threadIdx.y + 2][(threadIdx.x * ELEMENT_IN_THREAD_WIDTH) + 2 + 3] * (filter[2][2])
)
/ devision_coefficent
);
}
void check_cuda_status(cudaError_t cuda_status)
{
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
}
Result perform_GPU_shared_transactions_worker(Task task)
{
cudaEvent_t start_time, stop_time;
cudaEventCreate(&start_time);
cudaEventCreate(&stop_time);
size_t image_original_pitch;
size_t image_result_pitch;
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status = cudaMallocPitch(
(void**)(&image_original),
&image_original_pitch,
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.height
);
check_cuda_status(cuda_status);
cuda_status = cudaMemcpy2D(
image_original,
image_original_pitch,
task.image.matrix.matrix,
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.width * sizeof(unsigned char),
task.image.matrix.height,
cudaMemcpyHostToDevice
);
check_cuda_status(cuda_status);
cuda_status = cudaMallocPitch(
(void**)(&image_result),
&image_result_pitch,
task.work_matrix.width * sizeof(unsigned char),
task.image.matrix.height
);
check_cuda_status(cuda_status);
dim3 block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid;
grid.x = task.work_matrix.width / BLOCK_ELEMENT_X;
if (task.work_matrix.width % BLOCK_ELEMENT_X != 0)
grid.x += 1;
grid.y = task.work_matrix.height / BLOCKDIM_Y;
if (task.work_matrix.height % BLOCKDIM_Y != 0)
grid.y += 1;
cudaEventRecord(start_time);
gpu_shared_transactions_filter<<<grid, block>>>(
image_original,
task.image.matrix.width,
image_original_pitch,
image_result,
task.work_matrix.width,
task.work_matrix.height,
image_result_pitch,
task.division_coef
);
cudaDeviceSynchronize();
cudaEventRecord(stop_time);
cudaEventSynchronize(stop_time);
Result result;
cudaEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = cudaMemcpy2D(
task.work_matrix.matrix,
task.work_matrix.width * sizeof(unsigned char),
image_result,
image_result_pitch,
task.work_matrix.width * sizeof(unsigned char),
task.work_matrix.height,
cudaMemcpyDeviceToHost
);
check_cuda_status(cuda_status);
result.result = task.work_matrix;
cudaEventElapsedTime(&result.time, start_time, stop_time);
return result;
}
|
22fc946ff5035cef55281c2262135cea305b11d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <stdlib.h>
#include <stdio.h>
#define TPB 32
#define M 100
__device__
float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
//printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d,i = %2d:dist from %f to %f.\n",
// blockIdx.x,blockDim.x,threadIdx.x, i, ref, x, d_out[i]);
}
void distanceArray(float *out, float *in, float ref, int len){
hipEvent_t startCpy, stopCpy;
hipEvent_t startKernel, stopKernel;
hipEventCreate(&startCpy);
hipEventCreate(&stopCpy);
hipEventCreate(&startKernel);
hipEventCreate(&stopKernel);
float *d_in = 0;
float *d_out = 0;
hipMalloc(&d_in, len*sizeof(float));
hipMalloc(&d_out, len*sizeof(float));
hipEventRecord(startCpy);
for(int i=0;i < M;++i)
hipMemcpy(d_in, in, len*sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(stopCpy);
hipEventRecord(startKernel);
hipLaunchKernelGGL(( distanceKernel), dim3(len/TPB), dim3(TPB), 0, 0, d_out, d_in, ref);
hipEventRecord(stopKernel);
hipMemcpy(out, d_out, len*sizeof(float), hipMemcpyDeviceToHost);
hipEventSynchronize(stopCpy);
hipEventSynchronize(stopKernel);
float copyTime = 0;
hipEventElapsedTime(©Time,startCpy,stopCpy);
float kernelTime = 0;
hipEventElapsedTime(&kernelTime,startKernel,stopKernel);
printf("copy time:%f (ms)\nkernel time:%f (ms)\n",copyTime,kernelTime);
hipFree(d_in);
hipFree(d_out);
}
|
22fc946ff5035cef55281c2262135cea305b11d0.cu
|
#include "kernel.h"
#include <stdlib.h>
#include <stdio.h>
#define TPB 32
#define M 100
__device__
float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
//printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d,i = %2d:dist from %f to %f.\n",
// blockIdx.x,blockDim.x,threadIdx.x, i, ref, x, d_out[i]);
}
void distanceArray(float *out, float *in, float ref, int len){
cudaEvent_t startCpy, stopCpy;
cudaEvent_t startKernel, stopKernel;
cudaEventCreate(&startCpy);
cudaEventCreate(&stopCpy);
cudaEventCreate(&startKernel);
cudaEventCreate(&stopKernel);
float *d_in = 0;
float *d_out = 0;
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaEventRecord(startCpy);
for(int i=0;i < M;++i)
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stopCpy);
cudaEventRecord(startKernel);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
cudaEventRecord(stopKernel);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stopCpy);
cudaEventSynchronize(stopKernel);
float copyTime = 0;
cudaEventElapsedTime(©Time,startCpy,stopCpy);
float kernelTime = 0;
cudaEventElapsedTime(&kernelTime,startKernel,stopKernel);
printf("copy time:%f (ms)\nkernel time:%f (ms)\n",copyTime,kernelTime);
cudaFree(d_in);
cudaFree(d_out);
}
|
f07f5e831172557bd093acb6ef6702608b81e92e.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
********************************************************************************
*
* @file CudaImage.cxx
*
* @brief Class to handle a greyscale image using Nvidia CUDA to speedup computations on GPU.
*
* @version 1.0
*
* @todo Write here anything you did not implement.
*
* @date 21/11/2019
*
* @author Franck Vidal
* @author YOUR NAME
*
*
********************************************************************************
*/
//******************************************************************************
// Include
//******************************************************************************
#include <cmath> // Header file for abs and log
#include <limits>
#include <iostream>
#include <sstream>
#include <hip/hip_runtime.h> // Main cuda header for low-level driver API
#include <hip/hip_runtime_api.h> // Main cuda header for high-level runtime programming in C
#include <hip/hip_runtime.h> // Main cuda header for high-level programming in C++
//#include <cutil_math.h>
#include "CudaImage.h"
int CudaImage::m_device_count = 0;
//--------------------------------------------------------
__global__ void negativeFilterKernel(float* apOutputData,
float* apInputData,
float aMinValue,
float aDynamicRange,
int anImageSize)
//--------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = aMinValue + aDynamicRange * (1.0 - (apInputData[i] - aMinValue) / aDynamicRange);
}
//----------------------------------------------------------
__global__ void shiftScaleFilterKernel(float* apOutputData,
float* apInputData,
float aShiftValue,
float aScaleValue,
int anImageSize)
//----------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = (apInputData[i] + aShiftValue) * aScaleValue;
}
//---------------------------------------------------
__global__ void logFilterKernel(float* apOutputData,
float* apInputData,
int anImageSize)
//---------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = log(apInputData[i]);
}
//----------------------------------------------------------
__global__ void flipHorizontallyKernel(float* apOutputData,
float* apInputData,
int anImageWidth,
int anImageHeight)
//----------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageWidth * anImageHeight)
{
// Retrieve the 2D index (x, y) from i
// % is the "modulo operator", the remainder of i / m_width
int x = i % anImageWidth;
// where "/" is an integer division
int y = i / anImageWidth;
// Get the pixel
apOutputData[i] = apInputData[y * anImageWidth + anImageWidth - x - 1];
}
}
//--------------------------------------------------------
__global__ void flipVerticallyKernel(float* apOutputData,
float* apInputData,
int anImageWidth,
int anImageHeight)
//--------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageWidth * anImageHeight)
{
// Retrieve the 2D index (x, y) from i
// % is the "modulo operator", the remainder of i / m_width
int x = i % anImageWidth;
// where "/" is an integer division
int y = i / anImageWidth;
// Get the pixel
apOutputData[i] = apInputData[(anImageHeight - y - 1) * anImageWidth + x];
}
}
//--------------------------
CudaImage::CudaImage():
//--------------------------
Image(),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//--------------------------
{
getNumberOfDevices();
}
//-----------------------------------------
CudaImage::CudaImage(const Image& anImage):
//-----------------------------------------
Image(anImage),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//-----------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//---------------------------------------------
CudaImage::CudaImage(const CudaImage& anImage):
//---------------------------------------------
Image(anImage),
m_p_device_memory(0),
m_computing_time(anImage.m_computing_time),
m_host_to_device_transfer(anImage.m_host_to_device_transfer),
m_device_to_host_transfer(anImage.m_device_to_host_transfer),
m_device_to_device_transfer(anImage.m_device_to_device_transfer)
//---------------------------------------------
{
getNumberOfDevices();
loadDevice2Device(anImage.m_p_device_memory);
}
//-----------------------------------------
CudaImage::CudaImage(const float* apData,
unsigned int aWidth,
unsigned int aHeight):
//-----------------------------------------
Image(apData, aWidth, aHeight),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//-----------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//--------------------------------------------
CudaImage::CudaImage(unsigned int aWidth,
unsigned int aHeight,
float aDefaultValue):
//--------------------------------------------
Image(aWidth, aHeight, aDefaultValue),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//--------------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//---------------------
CudaImage::~CudaImage()
//---------------------
{
// Release the memory
destroy();
}
//------------------------------------
float CudaImage::getElasedTime() const
//------------------------------------
{
return m_computing_time + m_host_to_device_transfer + m_device_to_host_transfer + m_device_to_device_transfer;
}
//-------------------------------------------------------
CudaImage& CudaImage::operator=(const CudaImage& anImage)
//-------------------------------------------------------
{
if (this != &anImage)
{
Image::operator=(anImage);
m_computing_time = anImage.m_computing_time;
m_host_to_device_transfer = anImage.m_host_to_device_transfer;
m_device_to_host_transfer = anImage.m_device_to_host_transfer;
m_device_to_device_transfer = anImage.m_device_to_device_transfer;
loadDevice2Device(anImage.m_p_device_memory);
}
return *this;
}
//-----------------------
void CudaImage::destroy()
//-----------------------
{
Image::destroy();
releaseDeviceMemory();
}
//-----------------------------------
void CudaImage::releaseDeviceMemory()
//-----------------------------------
{
// Release memory on device
if (m_p_device_memory)
{
hipFree(m_p_device_memory);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
m_p_device_memory = 0;
}
}
//--------------------------------------------
void CudaImage::loadPGM(const char* aFileName)
//--------------------------------------------
{
Image::loadPGM(aFileName);
loadHost2Device();
}
//---------------------------------------------------
void CudaImage::loadPGM(const std::string& aFileName)
//---------------------------------------------------
{
CudaImage::loadPGM(aFileName.c_str());
}
//--------------------------------------------
void CudaImage::savePGM(const char* aFileName)
//--------------------------------------------
{
loadDevice2Host();
Image::savePGM(aFileName);
}
//---------------------------------------------------
void CudaImage::savePGM(const std::string& aFileName)
//---------------------------------------------------
{
CudaImage::savePGM(aFileName.c_str());
}
//----------------------------------------------
void CudaImage::loadASCII(const char* aFileName)
//----------------------------------------------
{
Image::loadASCII(aFileName);
loadHost2Device();
}
//-----------------------------------------------------
void CudaImage::loadASCII(const std::string& aFileName)
//-----------------------------------------------------
{
CudaImage::loadASCII(aFileName.c_str());
}
//---------------------------------------------
void CudaImage::saveASCII(const char* aFileName)
//---------------------------------------------
{
loadDevice2Host();
Image::saveASCII(aFileName);
}
//-----------------------------------------------------
void CudaImage::saveASCII(const std::string& aFileName)
//-----------------------------------------------------
{
CudaImage::saveASCII(aFileName.c_str());
}
//------------------------------
CudaImage CudaImage::operator!()
//------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, negativeFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Run the kernel
float min_value = getMinValue();
float range = getMaxValue() - min_value;
hipLaunchKernelGGL(( negativeFilterKernel), dim3(dim_grid), dim3(block_size) , 0, 0, temp.m_p_device_memory,
m_p_device_memory, min_value, range, image_size);
hipDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//------------------------------------------------------
CudaImage CudaImage::shiftScaleFilter(float aShiftValue,
float aScaleValue)
//------------------------------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, shiftScaleFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Run the kernel
hipLaunchKernelGGL(( shiftScaleFilterKernel), dim3(dim_grid), dim3(block_size) , 0, 0, temp.m_p_device_memory,
m_p_device_memory, aShiftValue, aScaleValue, image_size);
hipDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//----------------------------------
CudaImage CudaImage::getNormalised()
//----------------------------------
{
return shiftScaleFilter(-getMinValue(), 1.0 / (getMaxValue() - getMinValue()));
}
//----------------------------------
CudaImage CudaImage::getNormalized()
//----------------------------------
{
return (getNormalised());
}
//------------------------------
CudaImage CudaImage::logFilter()
//------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, logFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Run the kernel
hipLaunchKernelGGL(( logFilterKernel), dim3(dim_grid), dim3(block_size) , 0, 0, temp.m_p_device_memory,
m_p_device_memory, image_size);
hipDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//-------------------------------------
CudaImage CudaImage::flipHorizontally()
//-------------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, flipHorizontallyKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Run the kernel
hipLaunchKernelGGL(( flipHorizontallyKernel), dim3(dim_grid), dim3(block_size) , 0, 0, temp.m_p_device_memory,
m_p_device_memory, m_width, m_height);
hipDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//-----------------------------------
CudaImage CudaImage::flipVertically()
//-----------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, flipVerticallyKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Run the kernel
hipLaunchKernelGGL(( flipVerticallyKernel), dim3(dim_grid), dim3(block_size) , 0, 0, temp.m_p_device_memory,
m_p_device_memory, m_width, m_height);
hipDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//---------------------------------
int CudaImage::getNumberOfDevices()
//---------------------------------
{
hipGetDeviceCount(&m_device_count);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
std::cerr << "Number of CUDA devices:\t" << m_device_count << std::endl;
return m_device_count;
}
//-------------------------------------------------------
void CudaImage::checkCudaError(const char* aFileName,
const char* aFunctionName,
unsigned int aLineNumber)
//-------------------------------------------------------
{
// Get the latest CUDA error.
hipError_t error_code = hipGetLastError();
// A CUDA error has occured.
if (error_code != hipSuccess)
{
std::stringstream error_message;
error_message << "CUDA error: " << hipGetErrorString(error_code) << std::endl;
error_message << "\tin file: " << aFileName << std::endl;
error_message << "\tin function: " << aFunctionName << std::endl;
error_message << "\tat line: " << aLineNumber << std::endl;
throw error_message.str();
}
}
//-------------------------------
void CudaImage::loadHost2Device()
//-------------------------------
{
// Release the device memory if needed
releaseDeviceMemory();
// Allocate the memory on device
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
hipMalloc((void**) &m_p_device_memory, array_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Copy host memory to device memory
hipMemcpy(m_p_device_memory, &m_p_image[0], array_size, hipMemcpyHostToDevice);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_host_to_device_transfer += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
//-------------------------------
void CudaImage::loadDevice2Host()
//-------------------------------
{
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Copy device memory to host memory
hipMemcpy(&m_p_image[0], m_p_device_memory, array_size, hipMemcpyDeviceToHost);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_device_to_host_transfer += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
//-------------------------------------------------------------
void CudaImage::loadDevice2Device(const float* apImageOnDevice)
//-------------------------------------------------------------
{
// Release the device memory if needed
releaseDeviceMemory();
// Allocate the memory on device
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
hipMalloc((void**) &m_p_device_memory, array_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Create events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
hipEventRecord(start, 0);
// Copy device memory to device memory
hipMemcpy(m_p_device_memory, apImageOnDevice, array_size, hipMemcpyDeviceToDevice);
// Record stop event
hipEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
hipEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_device_to_device_transfer += duration;
// Destroy the events
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
|
f07f5e831172557bd093acb6ef6702608b81e92e.cu
|
/**
********************************************************************************
*
* @file CudaImage.cxx
*
* @brief Class to handle a greyscale image using Nvidia CUDA to speedup computations on GPU.
*
* @version 1.0
*
* @todo Write here anything you did not implement.
*
* @date 21/11/2019
*
* @author Franck Vidal
* @author YOUR NAME
*
*
********************************************************************************
*/
//******************************************************************************
// Include
//******************************************************************************
#include <cmath> // Header file for abs and log
#include <limits>
#include <iostream>
#include <sstream>
#include <cuda.h> // Main cuda header for low-level driver API
#include <cuda_runtime_api.h> // Main cuda header for high-level runtime programming in C
#include <cuda_runtime.h> // Main cuda header for high-level programming in C++
//#include <cutil_math.h>
#include "CudaImage.h"
int CudaImage::m_device_count = 0;
//--------------------------------------------------------
__global__ void negativeFilterKernel(float* apOutputData,
float* apInputData,
float aMinValue,
float aDynamicRange,
int anImageSize)
//--------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = aMinValue + aDynamicRange * (1.0 - (apInputData[i] - aMinValue) / aDynamicRange);
}
//----------------------------------------------------------
__global__ void shiftScaleFilterKernel(float* apOutputData,
float* apInputData,
float aShiftValue,
float aScaleValue,
int anImageSize)
//----------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = (apInputData[i] + aShiftValue) * aScaleValue;
}
//---------------------------------------------------
__global__ void logFilterKernel(float* apOutputData,
float* apInputData,
int anImageSize)
//---------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageSize)
apOutputData[i] = log(apInputData[i]);
}
//----------------------------------------------------------
__global__ void flipHorizontallyKernel(float* apOutputData,
float* apInputData,
int anImageWidth,
int anImageHeight)
//----------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageWidth * anImageHeight)
{
// Retrieve the 2D index (x, y) from i
// % is the "modulo operator", the remainder of i / m_width
int x = i % anImageWidth;
// where "/" is an integer division
int y = i / anImageWidth;
// Get the pixel
apOutputData[i] = apInputData[y * anImageWidth + anImageWidth - x - 1];
}
}
//--------------------------------------------------------
__global__ void flipVerticallyKernel(float* apOutputData,
float* apInputData,
int anImageWidth,
int anImageHeight)
//--------------------------------------------------------
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < anImageWidth * anImageHeight)
{
// Retrieve the 2D index (x, y) from i
// % is the "modulo operator", the remainder of i / m_width
int x = i % anImageWidth;
// where "/" is an integer division
int y = i / anImageWidth;
// Get the pixel
apOutputData[i] = apInputData[(anImageHeight - y - 1) * anImageWidth + x];
}
}
//--------------------------
CudaImage::CudaImage():
//--------------------------
Image(),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//--------------------------
{
getNumberOfDevices();
}
//-----------------------------------------
CudaImage::CudaImage(const Image& anImage):
//-----------------------------------------
Image(anImage),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//-----------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//---------------------------------------------
CudaImage::CudaImage(const CudaImage& anImage):
//---------------------------------------------
Image(anImage),
m_p_device_memory(0),
m_computing_time(anImage.m_computing_time),
m_host_to_device_transfer(anImage.m_host_to_device_transfer),
m_device_to_host_transfer(anImage.m_device_to_host_transfer),
m_device_to_device_transfer(anImage.m_device_to_device_transfer)
//---------------------------------------------
{
getNumberOfDevices();
loadDevice2Device(anImage.m_p_device_memory);
}
//-----------------------------------------
CudaImage::CudaImage(const float* apData,
unsigned int aWidth,
unsigned int aHeight):
//-----------------------------------------
Image(apData, aWidth, aHeight),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//-----------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//--------------------------------------------
CudaImage::CudaImage(unsigned int aWidth,
unsigned int aHeight,
float aDefaultValue):
//--------------------------------------------
Image(aWidth, aHeight, aDefaultValue),
m_p_device_memory(0),
m_computing_time(0),
m_host_to_device_transfer(0),
m_device_to_host_transfer(0),
m_device_to_device_transfer(0)
//--------------------------------------------
{
getNumberOfDevices();
loadHost2Device();
}
//---------------------
CudaImage::~CudaImage()
//---------------------
{
// Release the memory
destroy();
}
//------------------------------------
float CudaImage::getElasedTime() const
//------------------------------------
{
return m_computing_time + m_host_to_device_transfer + m_device_to_host_transfer + m_device_to_device_transfer;
}
//-------------------------------------------------------
CudaImage& CudaImage::operator=(const CudaImage& anImage)
//-------------------------------------------------------
{
if (this != &anImage)
{
Image::operator=(anImage);
m_computing_time = anImage.m_computing_time;
m_host_to_device_transfer = anImage.m_host_to_device_transfer;
m_device_to_host_transfer = anImage.m_device_to_host_transfer;
m_device_to_device_transfer = anImage.m_device_to_device_transfer;
loadDevice2Device(anImage.m_p_device_memory);
}
return *this;
}
//-----------------------
void CudaImage::destroy()
//-----------------------
{
Image::destroy();
releaseDeviceMemory();
}
//-----------------------------------
void CudaImage::releaseDeviceMemory()
//-----------------------------------
{
// Release memory on device
if (m_p_device_memory)
{
cudaFree(m_p_device_memory);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
m_p_device_memory = 0;
}
}
//--------------------------------------------
void CudaImage::loadPGM(const char* aFileName)
//--------------------------------------------
{
Image::loadPGM(aFileName);
loadHost2Device();
}
//---------------------------------------------------
void CudaImage::loadPGM(const std::string& aFileName)
//---------------------------------------------------
{
CudaImage::loadPGM(aFileName.c_str());
}
//--------------------------------------------
void CudaImage::savePGM(const char* aFileName)
//--------------------------------------------
{
loadDevice2Host();
Image::savePGM(aFileName);
}
//---------------------------------------------------
void CudaImage::savePGM(const std::string& aFileName)
//---------------------------------------------------
{
CudaImage::savePGM(aFileName.c_str());
}
//----------------------------------------------
void CudaImage::loadASCII(const char* aFileName)
//----------------------------------------------
{
Image::loadASCII(aFileName);
loadHost2Device();
}
//-----------------------------------------------------
void CudaImage::loadASCII(const std::string& aFileName)
//-----------------------------------------------------
{
CudaImage::loadASCII(aFileName.c_str());
}
//---------------------------------------------
void CudaImage::saveASCII(const char* aFileName)
//---------------------------------------------
{
loadDevice2Host();
Image::saveASCII(aFileName);
}
//-----------------------------------------------------
void CudaImage::saveASCII(const std::string& aFileName)
//-----------------------------------------------------
{
CudaImage::saveASCII(aFileName.c_str());
}
//------------------------------
CudaImage CudaImage::operator!()
//------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, negativeFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Run the kernel
float min_value = getMinValue();
float range = getMaxValue() - min_value;
negativeFilterKernel<<< dim_grid, block_size >>>(temp.m_p_device_memory,
m_p_device_memory, min_value, range, image_size);
cudaDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//------------------------------------------------------
CudaImage CudaImage::shiftScaleFilter(float aShiftValue,
float aScaleValue)
//------------------------------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, shiftScaleFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Run the kernel
shiftScaleFilterKernel<<< dim_grid, block_size >>>(temp.m_p_device_memory,
m_p_device_memory, aShiftValue, aScaleValue, image_size);
cudaDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//----------------------------------
CudaImage CudaImage::getNormalised()
//----------------------------------
{
return shiftScaleFilter(-getMinValue(), 1.0 / (getMaxValue() - getMinValue()));
}
//----------------------------------
CudaImage CudaImage::getNormalized()
//----------------------------------
{
return (getNormalised());
}
//------------------------------
CudaImage CudaImage::logFilter()
//------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, logFilterKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Run the kernel
logFilterKernel<<< dim_grid, block_size >>>(temp.m_p_device_memory,
m_p_device_memory, image_size);
cudaDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//-------------------------------------
CudaImage CudaImage::flipHorizontally()
//-------------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, flipHorizontallyKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Run the kernel
flipHorizontallyKernel<<< dim_grid, block_size >>>(temp.m_p_device_memory,
m_p_device_memory, m_width, m_height);
cudaDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//-----------------------------------
CudaImage CudaImage::flipVertically()
//-----------------------------------
{
// Create an image of the right size
CudaImage temp(getWidth(), getHeight(), 0.0);
// Configure the kernel
unsigned int image_size = m_width * m_height;
int block_size;
int min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, flipVerticallyKernel, 0, image_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
int dim_grid = image_size / block_size;
if (image_size % block_size) ++dim_grid;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Run the kernel
flipVerticallyKernel<<< dim_grid, block_size >>>(temp.m_p_device_memory,
m_p_device_memory, m_width, m_height);
cudaDeviceSynchronize();
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_computing_time += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
return temp;
}
//---------------------------------
int CudaImage::getNumberOfDevices()
//---------------------------------
{
cudaGetDeviceCount(&m_device_count);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
std::cerr << "Number of CUDA devices:\t" << m_device_count << std::endl;
return m_device_count;
}
//-------------------------------------------------------
void CudaImage::checkCudaError(const char* aFileName,
const char* aFunctionName,
unsigned int aLineNumber)
//-------------------------------------------------------
{
// Get the latest CUDA error.
cudaError_t error_code = cudaGetLastError();
// A CUDA error has occured.
if (error_code != cudaSuccess)
{
std::stringstream error_message;
error_message << "CUDA error: " << cudaGetErrorString(error_code) << std::endl;
error_message << "\tin file: " << aFileName << std::endl;
error_message << "\tin function: " << aFunctionName << std::endl;
error_message << "\tat line: " << aLineNumber << std::endl;
throw error_message.str();
}
}
//-------------------------------
void CudaImage::loadHost2Device()
//-------------------------------
{
// Release the device memory if needed
releaseDeviceMemory();
// Allocate the memory on device
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
cudaMalloc((void**) &m_p_device_memory, array_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Copy host memory to device memory
cudaMemcpy(m_p_device_memory, &m_p_image[0], array_size, cudaMemcpyHostToDevice);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_host_to_device_transfer += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
//-------------------------------
void CudaImage::loadDevice2Host()
//-------------------------------
{
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Copy device memory to host memory
cudaMemcpy(&m_p_image[0], m_p_device_memory, array_size, cudaMemcpyDeviceToHost);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_device_to_host_transfer += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
//-------------------------------------------------------------
void CudaImage::loadDevice2Device(const float* apImageOnDevice)
//-------------------------------------------------------------
{
// Release the device memory if needed
releaseDeviceMemory();
// Allocate the memory on device
unsigned int image_size = m_width * m_height;
unsigned int array_size = sizeof(float) * image_size;
cudaMalloc((void**) &m_p_device_memory, array_size);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Create events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Record start event
cudaEventRecord(start, 0);
// Copy device memory to device memory
cudaMemcpy(m_p_device_memory, apImageOnDevice, array_size, cudaMemcpyDeviceToDevice);
// Record stop event
cudaEventRecord(stop, 0);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Compute the time in ms
float duration;
cudaEventElapsedTime(&duration, start, stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
// Update the total time
m_device_to_device_transfer += duration;
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaError(__FILE__, __FUNCTION__, __LINE__);
}
|
0f41cfd3ef5fecdf94f71f8232522b19c60cc74f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Original written by Marc Suchard
// Modified by Andrew Cron
__global__ void k_%(name)s(float* in_measure, /** Precomputed measure */
float* in_random, /** Precomputed random number */
int* out_component, /** Resultant choice */
int iN, int iT, int logged) {
const int sample_density_block = blockDim.x;
const int sample_block = blockDim.y;
const int thidx = threadIdx.x;
const int thidy = threadIdx.y;
const int datumIndex = blockIdx.x * sample_block + thidy;
const int pdfIndex = datumIndex * iT;
const int tid = thidy*sample_density_block + thidx;
const int stride = sample_density_block+1;
// Make block size flexible ...
extern __shared__ float shared_data[];
float* measure = shared_data; // sample_block by stride
float* sum = measure + sample_block*stride;
float* work = sum + sample_block;
// use 'work' in multiple places to save on memory
if (tid < sample_block) {
sum[tid] = 0;
if(logged==1){
work[tid] = -10000;
} else {
work[tid] = 0;
}
}
if(logged==1){
//get the max values
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
for(int i=0; i<sample_density_block; i++) {
if(chunk + i < iT){
float dcurrent = measure[tid*stride + i];
if (dcurrent > work[tid]) {
work[tid] = dcurrent;
}
}
}
}
__syncthreads();
}
}
//get scaled cummulative pdfs
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
for(int i=0; i<sample_density_block; i++) {
if (chunk + i < iT){
if(logged==1){
//rescale and exp()
sum[tid] += expf(measure[tid*stride + i] - work[tid]);
} else {
sum[tid] += measure[tid*stride + i];
}
measure[tid*stride + i] = sum[tid];
}
}
}
__syncthreads();
if(datumIndex < iN && chunk + thidx < iT)
in_measure[pdfIndex + chunk + thidx] = measure[thidy*stride + thidx];
}
__syncthreads();
if (tid < sample_block && logged==1){
work[tid] = 0;
}
float* randomNumber = sum;
const int result_id = blockIdx.x * sample_block + tid;
if ( result_id < iN && tid < sample_block)
randomNumber[tid] = in_random[result_id] * sum[tid];
// Find the right bin for the random number ...
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
// storing the index in a float is better because it avoids
// bank conflicts ...
for(int i=0; i<sample_density_block; i++) {
if (chunk + i < iT){
if (randomNumber[tid] > measure[tid*stride + i]){
work[tid] = i + chunk + 1;
}
}
}
if ( work[tid] >= iT) {work[tid] = iT-1;}
}
__syncthreads();
}
// this is now coalesced
if (result_id < iN && tid < sample_block)
out_component[result_id] = (int) work[tid];
}
|
0f41cfd3ef5fecdf94f71f8232522b19c60cc74f.cu
|
// Original written by Marc Suchard
// Modified by Andrew Cron
__global__ void k_%(name)s(float* in_measure, /** Precomputed measure */
float* in_random, /** Precomputed random number */
int* out_component, /** Resultant choice */
int iN, int iT, int logged) {
const int sample_density_block = blockDim.x;
const int sample_block = blockDim.y;
const int thidx = threadIdx.x;
const int thidy = threadIdx.y;
const int datumIndex = blockIdx.x * sample_block + thidy;
const int pdfIndex = datumIndex * iT;
const int tid = thidy*sample_density_block + thidx;
const int stride = sample_density_block+1;
// Make block size flexible ...
extern __shared__ float shared_data[];
float* measure = shared_data; // sample_block by stride
float* sum = measure + sample_block*stride;
float* work = sum + sample_block;
// use 'work' in multiple places to save on memory
if (tid < sample_block) {
sum[tid] = 0;
if(logged==1){
work[tid] = -10000;
} else {
work[tid] = 0;
}
}
if(logged==1){
//get the max values
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
for(int i=0; i<sample_density_block; i++) {
if(chunk + i < iT){
float dcurrent = measure[tid*stride + i];
if (dcurrent > work[tid]) {
work[tid] = dcurrent;
}
}
}
}
__syncthreads();
}
}
//get scaled cummulative pdfs
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
for(int i=0; i<sample_density_block; i++) {
if (chunk + i < iT){
if(logged==1){
//rescale and exp()
sum[tid] += expf(measure[tid*stride + i] - work[tid]);
} else {
sum[tid] += measure[tid*stride + i];
}
measure[tid*stride + i] = sum[tid];
}
}
}
__syncthreads();
if(datumIndex < iN && chunk + thidx < iT)
in_measure[pdfIndex + chunk + thidx] = measure[thidy*stride + thidx];
}
__syncthreads();
if (tid < sample_block && logged==1){
work[tid] = 0;
}
float* randomNumber = sum;
const int result_id = blockIdx.x * sample_block + tid;
if ( result_id < iN && tid < sample_block)
randomNumber[tid] = in_random[result_id] * sum[tid];
// Find the right bin for the random number ...
for(int chunk = 0; chunk < iT; chunk += sample_density_block) {
if(pdfIndex + chunk + thidx < iN*iT)
measure[thidy*stride + thidx] = in_measure[pdfIndex + chunk + thidx];
__syncthreads();
if (tid < sample_block) {
// storing the index in a float is better because it avoids
// bank conflicts ...
for(int i=0; i<sample_density_block; i++) {
if (chunk + i < iT){
if (randomNumber[tid] > measure[tid*stride + i]){
work[tid] = i + chunk + 1;
}
}
}
if ( work[tid] >= iT) {work[tid] = iT-1;}
}
__syncthreads();
}
// this is now coalesced
if (result_id < iN && tid < sample_block)
out_component[result_id] = (int) work[tid];
}
|
b4903492988735f103065133d2828e5cdf104a28.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_utils.hcu"
/***************************************************************
! cuda_utils.cu
!
! CUDA interface routines. Generally, contained here are those
! methods that require at least some CUDA. Those that are
! mainly interfaces to the CUDA RTL are contained in 'cuda_bindings.f90'
!
! 2011 Duane Rosenberg & Pablo D. Mininni
! National Center for Atmospheric Research
! e-mail: [email protected]
!
***************************************************************/
#include <hipfft.h>
extern "C" {
/* Memcpy methods: */
hipError_t cudaMemcpyHost2Dev( void *devdst, const void *hostsrc, size_t count)
{
hipError_t iret;
iret = hipMemcpy( devdst, hostsrc, count, hipMemcpyHostToDevice ) ;
return iret;
}
hipError_t cudaMemcpyDev2Host( void *hostdst, const void *devsrc, size_t count)
{
hipError_t iret;
iret = hipMemcpy( hostdst , devsrc, count, hipMemcpyDeviceToHost );
return iret;
}
hipError_t cudaMemcpyAsyncHost2Dev( void *devdst, const void *hostsrc, size_t count, hipStream_t *stream)
{
hipError_t iret;
iret = hipMemcpyAsync( devdst, hostsrc, count, hipMemcpyHostToDevice, *stream );
return iret;
}
hipError_t cudaMemcpyAsyncDev2Host( void *hostdst, const void *devsrc, size_t count, hipStream_t *stream)
{
hipError_t iret;
iret = hipMemcpyAsync( hostdst, devsrc, count, hipMemcpyDeviceToHost, *stream );
return iret;
}
hipError_t cudaMemcpyAsyncOffHost2Dev( void *devdst, size_t byteoffdev, const void *hostsrc, size_t byteoffhost, size_t count, hipStream_t *stream)
{
hipError_t iret;
iret = hipMemcpyAsync( (char *) devdst + byteoffdev, (char *) hostsrc + byteoffhost, count, hipMemcpyHostToDevice, *stream );
return iret;
}
hipError_t cudaStreamAttach( hipStream_t *stream, void *devptr, size_t byteoffset, size_t len)
{
hipError_t iret;
iret = hipStreamAttachMemAsync( *stream, (char *) devptr + byteoffset, 0, hipMemAttachSingle );
return iret;
}
hipError_t cudaMemcpyAsyncOffDev2Host( void *hostdst, size_t byteoffhost, const void *devsrc, size_t byteoffdev, size_t count, hipStream_t *stream)
{
hipError_t iret;
iret = hipMemcpyAsync( (char *) hostdst + byteoffhost, (char *) devsrc + byteoffdev, count, hipMemcpyDeviceToHost, *stream );
return iret;
}
hipfftResult cufftPlanManyNULL( hipfftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, hipfftType type, int batch)
{
hipfftResult iret;
iret = hipfftPlanMany(plan, rank, n, NULL, istride, idist, NULL, ostride, odist, type, batch);
return iret;
}
/* Stream methods: */
hipError_t ptr_cudaStreamCreate( hipStream_t **stream)
{
*stream = (hipStream_t *) malloc(sizeof(hipStream_t));
return hipStreamCreate( *stream );
}
hipError_t f_cudaStreamSynchronize( hipStream_t *stream)
{
hipError_t iret;
iret = hipStreamSynchronize( *stream );
return iret;
}
hipfftResult f_cufftSetStream( hipfftHandle plan, hipStream_t *stream)
{
hipfftResult iret;
iret = hipfftSetStream( plan, *stream );
return iret;
}
/* Interfaces for cuFFT with offsets: */
hipfftResult cufftExecOffC2R( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecC2R( plan, (hipfftComplex *) ptrin, (hipfftReal *) ptrout );
return iret;
}
hipfftResult cufftExecOffR2C( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecR2C( plan, (hipfftReal *) ptrin, (hipfftComplex *) ptrout );
return iret;
}
hipfftResult cufftExecOffC2C( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout, int dir)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecC2C( plan, (hipfftComplex *) ptrin, (hipfftComplex *) ptrout, dir );
return iret;
}
hipfftResult cufftExecOffZ2D( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecZ2D( plan, (hipfftDoubleComplex *) ptrin, (hipfftDoubleReal *) ptrout );
return iret;
}
hipfftResult cufftExecOffD2Z( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecD2Z( plan, (hipfftDoubleReal *) ptrin, (hipfftDoubleComplex *) ptrout );
return iret;
}
hipfftResult cufftExecOffZ2Z( hipfftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout, int dir)
{
hipfftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = hipfftExecZ2Z( plan, (hipfftDoubleComplex *) ptrin, (hipfftDoubleComplex *) ptrout, dir );
return iret;
}
void w_cudaErrChk_(char *sfile, int iline)
{
hipError_t err=hipGetLastError();
if ( err != hipSuccess ) {
printf("Cuda fatal: %s:%d: '%s'\n",sfile,iline,hipGetErrorString(err));
exit(0);
}
}
} /* end, extern "C" interface */
|
b4903492988735f103065133d2828e5cdf104a28.cu
|
#include "cuda_utils.hcu"
/***************************************************************
! cuda_utils.cu
!
! CUDA interface routines. Generally, contained here are those
! methods that require at least some CUDA. Those that are
! mainly interfaces to the CUDA RTL are contained in 'cuda_bindings.f90'
!
! 2011 Duane Rosenberg & Pablo D. Mininni
! National Center for Atmospheric Research
! e-mail: [email protected]
!
***************************************************************/
#include <cufft.h>
extern "C" {
/* Memcpy methods: */
cudaError_t cudaMemcpyHost2Dev( void *devdst, const void *hostsrc, size_t count)
{
cudaError_t iret;
iret = cudaMemcpy( devdst, hostsrc, count, cudaMemcpyHostToDevice ) ;
return iret;
}
cudaError_t cudaMemcpyDev2Host( void *hostdst, const void *devsrc, size_t count)
{
cudaError_t iret;
iret = cudaMemcpy( hostdst , devsrc, count, cudaMemcpyDeviceToHost );
return iret;
}
cudaError_t cudaMemcpyAsyncHost2Dev( void *devdst, const void *hostsrc, size_t count, cudaStream_t *stream)
{
cudaError_t iret;
iret = cudaMemcpyAsync( devdst, hostsrc, count, cudaMemcpyHostToDevice, *stream );
return iret;
}
cudaError_t cudaMemcpyAsyncDev2Host( void *hostdst, const void *devsrc, size_t count, cudaStream_t *stream)
{
cudaError_t iret;
iret = cudaMemcpyAsync( hostdst, devsrc, count, cudaMemcpyDeviceToHost, *stream );
return iret;
}
cudaError_t cudaMemcpyAsyncOffHost2Dev( void *devdst, size_t byteoffdev, const void *hostsrc, size_t byteoffhost, size_t count, cudaStream_t *stream)
{
cudaError_t iret;
iret = cudaMemcpyAsync( (char *) devdst + byteoffdev, (char *) hostsrc + byteoffhost, count, cudaMemcpyHostToDevice, *stream );
return iret;
}
cudaError_t cudaStreamAttach( cudaStream_t *stream, void *devptr, size_t byteoffset, size_t len)
{
cudaError_t iret;
iret = cudaStreamAttachMemAsync( *stream, (char *) devptr + byteoffset, 0, cudaMemAttachSingle );
return iret;
}
cudaError_t cudaMemcpyAsyncOffDev2Host( void *hostdst, size_t byteoffhost, const void *devsrc, size_t byteoffdev, size_t count, cudaStream_t *stream)
{
cudaError_t iret;
iret = cudaMemcpyAsync( (char *) hostdst + byteoffhost, (char *) devsrc + byteoffdev, count, cudaMemcpyDeviceToHost, *stream );
return iret;
}
cufftResult cufftPlanManyNULL( cufftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch)
{
cufftResult iret;
iret = cufftPlanMany(plan, rank, n, NULL, istride, idist, NULL, ostride, odist, type, batch);
return iret;
}
/* Stream methods: */
cudaError_t ptr_cudaStreamCreate( cudaStream_t **stream)
{
*stream = (cudaStream_t *) malloc(sizeof(cudaStream_t));
return cudaStreamCreate( *stream );
}
cudaError_t f_cudaStreamSynchronize( cudaStream_t *stream)
{
cudaError_t iret;
iret = cudaStreamSynchronize( *stream );
return iret;
}
cufftResult f_cufftSetStream( cufftHandle plan, cudaStream_t *stream)
{
cufftResult iret;
iret = cufftSetStream( plan, *stream );
return iret;
}
/* Interfaces for cuFFT with offsets: */
cufftResult cufftExecOffC2R( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecC2R( plan, (cufftComplex *) ptrin, (cufftReal *) ptrout );
return iret;
}
cufftResult cufftExecOffR2C( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecR2C( plan, (cufftReal *) ptrin, (cufftComplex *) ptrout );
return iret;
}
cufftResult cufftExecOffC2C( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout, int dir)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecC2C( plan, (cufftComplex *) ptrin, (cufftComplex *) ptrout, dir );
return iret;
}
cufftResult cufftExecOffZ2D( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecZ2D( plan, (cufftDoubleComplex *) ptrin, (cufftDoubleReal *) ptrout );
return iret;
}
cufftResult cufftExecOffD2Z( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecD2Z( plan, (cufftDoubleReal *) ptrin, (cufftDoubleComplex *) ptrout );
return iret;
}
cufftResult cufftExecOffZ2Z( cufftHandle plan, void *datain, size_t byteoffin, void *dataout, size_t byteoffout, int dir)
{
cufftResult iret;
char* ptrin = (char *) datain + byteoffin;
char* ptrout = (char *) dataout + byteoffout;
iret = cufftExecZ2Z( plan, (cufftDoubleComplex *) ptrin, (cufftDoubleComplex *) ptrout, dir );
return iret;
}
void w_cudaErrChk_(char *sfile, int iline)
{
cudaError_t err=cudaGetLastError();
if ( err != cudaSuccess ) {
printf("Cuda fatal: %s:%d: '%s'\n",sfile,iline,cudaGetErrorString(err));
exit(0);
}
}
} /* end, extern "C" interface */
|
f46bcb40d90b86122741aaf455154e9bee03c7e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/layers/beam_search_layers/BeamSearchLayer.h"
namespace fastertransformer {
template<typename T>
__global__ void logProbAddCumLogProb(float* log_probs,
const T* logits,
const float* cum_log_probs,
const int* end_ids,
const bool* finished,
const int beam_width,
const int n)
{
int bid = blockIdx.x;
bool finish = finished != nullptr ? finished[bid] : false;
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
if (finish) {
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = (tid == end_ids[bid / beam_width]) ? cum_log_probs[bid] : -FLT_MAX;
}
}
else {
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = (float)(logits[offset + tid]);
max_val = max(max_val, log_probs[offset + tid]);
}
max_val = blockReduceMax(max_val);
if (threadIdx.x == 0) {
s_max_val = max_val;
}
__syncthreads();
float sum_val = 0.0f;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = __expf(log_probs[offset + tid] - s_max_val);
sum_val += log_probs[offset + tid];
}
sum_val = blockReduceSum(sum_val);
if (threadIdx.x == 0) {
s_sum_val = sum_val + 1e-6f;
}
__syncthreads();
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = logf(log_probs[offset + tid] / s_sum_val) + cum_log_probs[bid];
}
}
}
template<typename T>
void invokeLogProbAddCumLogProb(float* log_probs,
const T* logits,
const float* cum_log_probs,
const int* end_ids,
const bool* finished,
const int m,
const int beam_width,
const int n,
hipStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
hipLaunchKernelGGL(( logProbAddCumLogProb), dim3(grid), dim3(block), 0, stream,
log_probs, logits, cum_log_probs, end_ids, finished, beam_width, n);
}
template<typename T>
__global__ void updateStatesKernel(T* log_probs,
T* cum_log_probs,
float* output_log_probs,
bool* finished,
int* parent_ids,
int* sequence_length,
int* word_ids,
int* output_ids,
BeamHypotheses beam_hyps,
const int local_batch_size,
const int beam_width,
const int vocab_size,
const int* end_ids)
{
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < local_batch_size * beam_width;
index += blockDim.x * gridDim.x) {
int batch_id = index / beam_width;
sequence_length[index] = finished[index] ? sequence_length[index] : sequence_length[index] + 1;
int beam_id = (word_ids[index] / vocab_size) % beam_width;
int word_id = word_ids[index] % vocab_size;
if (output_log_probs != nullptr) {
// get the cum_log_probs of previous run
output_log_probs[index] = log_probs[batch_id * beam_width * vocab_size + beam_id * vocab_size + word_id]
- cum_log_probs[batch_id * beam_width + beam_id];
}
cum_log_probs[index] = log_probs[batch_id * beam_width * vocab_size + beam_id * vocab_size + word_id];
sequence_length[index] = sequence_length[batch_id * beam_width + beam_id];
finished[index] = word_id == end_ids[batch_id] ? 1 : 0;
parent_ids[index] = beam_id;
word_ids[index] = word_id;
output_ids[index] = word_id;
if (beam_hyps.num_beams != nullptr) {
if (beam_hyps.num_beams[beam_hyps.ite * beam_hyps.local_batch_size + batch_id] == beam_width) {
for (int i = 0; i < beam_width; i++) {
finished[batch_id * beam_width + i] = true;
}
}
}
}
}
void invokeUpdateStates(float* log_probs,
float* cum_log_probs,
float* output_log_probs,
bool* finished,
int* parent_ids,
int* sequence_length,
int* word_ids,
int* output_ids,
BeamHypotheses* beam_hyps,
const int local_batch_size,
const int beam_width,
const int vocab_size,
const int* end_ids,
hipStream_t stream)
{
dim3 grid((int)ceil(local_batch_size * beam_width * 1.0 / 256));
dim3 block(256);
hipLaunchKernelGGL(( updateStatesKernel<float>), dim3(grid), dim3(block), 0, stream, log_probs,
cum_log_probs,
output_log_probs,
finished,
parent_ids,
sequence_length,
word_ids,
output_ids,
*beam_hyps,
local_batch_size,
beam_width,
vocab_size,
end_ids);
}
template<typename T>
void BeamSearchLayer<T>::invokeSoftMax(TensorMap* output_tensors, TensorMap* input_tensors)
{
// input_tensors:
// logits [local_batch_size, beam_width, vocab_size_padded]
// embedding_bias [vocab_size_padded]
// step [1] on cpu
// src_cache_indirection [local_batch_size, beam_width, max_seq_len]
// max_input_length [1] on cpu
// input_lengths [local_batch_size * beam_width]
// ite [1] on cpu
// beam_search_diversity_rate [1] on cpu, optional
// temperature [1] on cpu, optional
// len_penalty [1] on cpu, optional
// repetition_penalty [1] on cpu, optional
// output_tensors:
// output_ids [max_seq_len, batch_size, beam_width]
// finished [local_batch_size * beam_width]
// cum_log_probs [local_batch_size * beam_width]
// parent_ids [max_seq_len, batch_size * beam_width]
// sequence_length [local_batch_size * beam_width]
// tgt_cache_indirection [local_batch_size, beam_width, max_seq_len]
// output_log_probs [max_seq_len, batch_size * beam_width], optional
// beam_hyps, optional
FT_CHECK(input_tensors->size() >= 7);
FT_CHECK(output_tensors->size() >= 6);
const int batch_size = output_tensors->at("output_ids").shape[1];
const int beam_width = output_tensors->at("output_ids").shape[2];
const int step = input_tensors->at("step").getVal<int>();
const int ite = input_tensors->at("ite").getVal<int>();
const int local_batch_size = input_tensors->at("logits").shape[0];
const float diversity_rate = input_tensors->isExist("beam_search_diversity_rate") ?
input_tensors->at("beam_search_diversity_rate").getVal<float>() :
0.0f;
const float length_penalty =
input_tensors->isExist("len_penalty") ? input_tensors->at("len_penalty").getVal<float>() : 0.0f;
const int id_offset = step * batch_size * beam_width + ite * local_batch_size * beam_width;
invokeLogProbAddCumLogProb(float_log_prob_buf_,
input_tensors->at("logits").getPtr<T>(),
output_tensors->at("cum_log_probs").getPtr<float>(),
input_tensors->at("end_id").getPtr<const int>(),
output_tensors->at("finished").getPtr<bool>(),
local_batch_size * beam_width,
beam_width,
vocab_size_padded_,
stream_);
sync_check_cuda_error();
BeamHypotheses beam_hyps;
if (output_tensors->isExist("beam_hyps") && diversity_rate == 0.0f) {
beam_hyps = *((BeamHypotheses*)(output_tensors->at("beam_hyps").getPtr<void>()));
beam_hyps.step = step;
beam_hyps.ite = ite;
beam_hyps.local_batch_size = local_batch_size;
beam_hyps.batch_size = output_tensors->at("output_ids").shape[1];
beam_hyps.max_seq_len = output_tensors->at("output_ids").shape[0];
beam_hyps.output_ids_src = output_tensors->at("output_ids").getPtr<int>();
beam_hyps.parent_ids_src = output_tensors->at("parent_ids").getPtr<int>();
beam_hyps.sequence_lengths_src = output_tensors->at("sequence_length").getPtr<int>();
beam_hyps.length_penalty = length_penalty;
}
invokeTopkBeamSearch<float>(topk_softmax_workspace_,
topk_softmax_workspace_size_,
float_log_prob_buf_,
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
&beam_hyps,
output_tensors->at("finished").getPtr<bool>(),
output_tensors->isExist("sequence_length") ?
output_tensors->at("sequence_length").getPtr<int>() :
(int*)nullptr,
local_batch_size,
beam_width,
vocab_size_padded_,
diversity_rate,
length_penalty,
input_tensors->at("end_id").getPtr<const int>(),
stream_);
sync_check_cuda_error();
invokeUpdateStates(float_log_prob_buf_,
output_tensors->at("cum_log_probs").getPtr<float>(),
output_tensors->getPtrWithOffset<float>("output_log_probs", id_offset, nullptr),
output_tensors->at("finished").getPtr<bool>(),
output_tensors->at("parent_ids").getPtrWithOffset<int>(id_offset),
output_tensors->at("sequence_length").getPtr<int>(),
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
&beam_hyps,
local_batch_size,
beam_width,
vocab_size_padded_,
input_tensors->at("end_id").getPtr<const int>(),
stream_);
sync_check_cuda_error();
}
template<typename T>
void BeamSearchLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void BeamSearchLayer<T>::allocateBuffer(size_t batch_size, size_t beam_width)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
invokeTopkBeamSearch<float>(nullptr,
topk_softmax_workspace_size_,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
batch_size,
beam_width,
vocab_size_padded_,
0.0f, // diversity rate
0.0f, // length penalty
nullptr,
stream_);
topk_softmax_workspace_ = reinterpret_cast<float*>(allocator_->reMalloc(
topk_softmax_workspace_,
topk_softmax_workspace_size_ + sizeof(float) * batch_size * beam_width * vocab_size_padded_,
false));
float_log_prob_buf_ = (float*)((char*)topk_softmax_workspace_ + topk_softmax_workspace_size_);
is_allocate_buffer_ = true;
}
template<typename T>
BeamSearchLayer<T>::BeamSearchLayer(size_t max_batch_size,
size_t head_num,
size_t size_per_head,
size_t beam_width,
size_t vocab_size,
size_t vocab_size_padded,
int end_id,
float diversity_rate,
float temperature,
float len_penalty,
float repetition_penalty,
hipStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward):
BaseBeamSearchLayer<T>(max_batch_size,
head_num,
size_per_head,
beam_width,
vocab_size,
vocab_size_padded,
end_id,
diversity_rate,
temperature,
len_penalty,
repetition_penalty,
stream,
cublas_wrapper,
allocator,
is_free_buffer_after_forward)
{
}
template<typename T>
BeamSearchLayer<T>::BeamSearchLayer(BeamSearchLayer<T> const& beam_search_layer):
BaseBeamSearchLayer<T>(beam_search_layer)
{
}
template<typename T>
BeamSearchLayer<T>::~BeamSearchLayer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
}
template class BeamSearchLayer<float>;
template class BeamSearchLayer<half>;
} // namespace fastertransformer
|
f46bcb40d90b86122741aaf455154e9bee03c7e5.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/layers/beam_search_layers/BeamSearchLayer.h"
namespace fastertransformer {
template<typename T>
__global__ void logProbAddCumLogProb(float* log_probs,
const T* logits,
const float* cum_log_probs,
const int* end_ids,
const bool* finished,
const int beam_width,
const int n)
{
int bid = blockIdx.x;
bool finish = finished != nullptr ? finished[bid] : false;
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
if (finish) {
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = (tid == end_ids[bid / beam_width]) ? cum_log_probs[bid] : -FLT_MAX;
}
}
else {
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = (float)(logits[offset + tid]);
max_val = max(max_val, log_probs[offset + tid]);
}
max_val = blockReduceMax(max_val);
if (threadIdx.x == 0) {
s_max_val = max_val;
}
__syncthreads();
float sum_val = 0.0f;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = __expf(log_probs[offset + tid] - s_max_val);
sum_val += log_probs[offset + tid];
}
sum_val = blockReduceSum(sum_val);
if (threadIdx.x == 0) {
s_sum_val = sum_val + 1e-6f;
}
__syncthreads();
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
log_probs[offset + tid] = logf(log_probs[offset + tid] / s_sum_val) + cum_log_probs[bid];
}
}
}
template<typename T>
void invokeLogProbAddCumLogProb(float* log_probs,
const T* logits,
const float* cum_log_probs,
const int* end_ids,
const bool* finished,
const int m,
const int beam_width,
const int n,
cudaStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
logProbAddCumLogProb<<<grid, block, 0, stream>>>(
log_probs, logits, cum_log_probs, end_ids, finished, beam_width, n);
}
template<typename T>
__global__ void updateStatesKernel(T* log_probs,
T* cum_log_probs,
float* output_log_probs,
bool* finished,
int* parent_ids,
int* sequence_length,
int* word_ids,
int* output_ids,
BeamHypotheses beam_hyps,
const int local_batch_size,
const int beam_width,
const int vocab_size,
const int* end_ids)
{
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < local_batch_size * beam_width;
index += blockDim.x * gridDim.x) {
int batch_id = index / beam_width;
sequence_length[index] = finished[index] ? sequence_length[index] : sequence_length[index] + 1;
int beam_id = (word_ids[index] / vocab_size) % beam_width;
int word_id = word_ids[index] % vocab_size;
if (output_log_probs != nullptr) {
// get the cum_log_probs of previous run
output_log_probs[index] = log_probs[batch_id * beam_width * vocab_size + beam_id * vocab_size + word_id]
- cum_log_probs[batch_id * beam_width + beam_id];
}
cum_log_probs[index] = log_probs[batch_id * beam_width * vocab_size + beam_id * vocab_size + word_id];
sequence_length[index] = sequence_length[batch_id * beam_width + beam_id];
finished[index] = word_id == end_ids[batch_id] ? 1 : 0;
parent_ids[index] = beam_id;
word_ids[index] = word_id;
output_ids[index] = word_id;
if (beam_hyps.num_beams != nullptr) {
if (beam_hyps.num_beams[beam_hyps.ite * beam_hyps.local_batch_size + batch_id] == beam_width) {
for (int i = 0; i < beam_width; i++) {
finished[batch_id * beam_width + i] = true;
}
}
}
}
}
void invokeUpdateStates(float* log_probs,
float* cum_log_probs,
float* output_log_probs,
bool* finished,
int* parent_ids,
int* sequence_length,
int* word_ids,
int* output_ids,
BeamHypotheses* beam_hyps,
const int local_batch_size,
const int beam_width,
const int vocab_size,
const int* end_ids,
cudaStream_t stream)
{
dim3 grid((int)ceil(local_batch_size * beam_width * 1.0 / 256));
dim3 block(256);
updateStatesKernel<float><<<grid, block, 0, stream>>>(log_probs,
cum_log_probs,
output_log_probs,
finished,
parent_ids,
sequence_length,
word_ids,
output_ids,
*beam_hyps,
local_batch_size,
beam_width,
vocab_size,
end_ids);
}
template<typename T>
void BeamSearchLayer<T>::invokeSoftMax(TensorMap* output_tensors, TensorMap* input_tensors)
{
// input_tensors:
// logits [local_batch_size, beam_width, vocab_size_padded]
// embedding_bias [vocab_size_padded]
// step [1] on cpu
// src_cache_indirection [local_batch_size, beam_width, max_seq_len]
// max_input_length [1] on cpu
// input_lengths [local_batch_size * beam_width]
// ite [1] on cpu
// beam_search_diversity_rate [1] on cpu, optional
// temperature [1] on cpu, optional
// len_penalty [1] on cpu, optional
// repetition_penalty [1] on cpu, optional
// output_tensors:
// output_ids [max_seq_len, batch_size, beam_width]
// finished [local_batch_size * beam_width]
// cum_log_probs [local_batch_size * beam_width]
// parent_ids [max_seq_len, batch_size * beam_width]
// sequence_length [local_batch_size * beam_width]
// tgt_cache_indirection [local_batch_size, beam_width, max_seq_len]
// output_log_probs [max_seq_len, batch_size * beam_width], optional
// beam_hyps, optional
FT_CHECK(input_tensors->size() >= 7);
FT_CHECK(output_tensors->size() >= 6);
const int batch_size = output_tensors->at("output_ids").shape[1];
const int beam_width = output_tensors->at("output_ids").shape[2];
const int step = input_tensors->at("step").getVal<int>();
const int ite = input_tensors->at("ite").getVal<int>();
const int local_batch_size = input_tensors->at("logits").shape[0];
const float diversity_rate = input_tensors->isExist("beam_search_diversity_rate") ?
input_tensors->at("beam_search_diversity_rate").getVal<float>() :
0.0f;
const float length_penalty =
input_tensors->isExist("len_penalty") ? input_tensors->at("len_penalty").getVal<float>() : 0.0f;
const int id_offset = step * batch_size * beam_width + ite * local_batch_size * beam_width;
invokeLogProbAddCumLogProb(float_log_prob_buf_,
input_tensors->at("logits").getPtr<T>(),
output_tensors->at("cum_log_probs").getPtr<float>(),
input_tensors->at("end_id").getPtr<const int>(),
output_tensors->at("finished").getPtr<bool>(),
local_batch_size * beam_width,
beam_width,
vocab_size_padded_,
stream_);
sync_check_cuda_error();
BeamHypotheses beam_hyps;
if (output_tensors->isExist("beam_hyps") && diversity_rate == 0.0f) {
beam_hyps = *((BeamHypotheses*)(output_tensors->at("beam_hyps").getPtr<void>()));
beam_hyps.step = step;
beam_hyps.ite = ite;
beam_hyps.local_batch_size = local_batch_size;
beam_hyps.batch_size = output_tensors->at("output_ids").shape[1];
beam_hyps.max_seq_len = output_tensors->at("output_ids").shape[0];
beam_hyps.output_ids_src = output_tensors->at("output_ids").getPtr<int>();
beam_hyps.parent_ids_src = output_tensors->at("parent_ids").getPtr<int>();
beam_hyps.sequence_lengths_src = output_tensors->at("sequence_length").getPtr<int>();
beam_hyps.length_penalty = length_penalty;
}
invokeTopkBeamSearch<float>(topk_softmax_workspace_,
topk_softmax_workspace_size_,
float_log_prob_buf_,
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
&beam_hyps,
output_tensors->at("finished").getPtr<bool>(),
output_tensors->isExist("sequence_length") ?
output_tensors->at("sequence_length").getPtr<int>() :
(int*)nullptr,
local_batch_size,
beam_width,
vocab_size_padded_,
diversity_rate,
length_penalty,
input_tensors->at("end_id").getPtr<const int>(),
stream_);
sync_check_cuda_error();
invokeUpdateStates(float_log_prob_buf_,
output_tensors->at("cum_log_probs").getPtr<float>(),
output_tensors->getPtrWithOffset<float>("output_log_probs", id_offset, nullptr),
output_tensors->at("finished").getPtr<bool>(),
output_tensors->at("parent_ids").getPtrWithOffset<int>(id_offset),
output_tensors->at("sequence_length").getPtr<int>(),
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
output_tensors->at("output_ids").getPtrWithOffset<int>(id_offset),
&beam_hyps,
local_batch_size,
beam_width,
vocab_size_padded_,
input_tensors->at("end_id").getPtr<const int>(),
stream_);
sync_check_cuda_error();
}
template<typename T>
void BeamSearchLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void BeamSearchLayer<T>::allocateBuffer(size_t batch_size, size_t beam_width)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
invokeTopkBeamSearch<float>(nullptr,
topk_softmax_workspace_size_,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
batch_size,
beam_width,
vocab_size_padded_,
0.0f, // diversity rate
0.0f, // length penalty
nullptr,
stream_);
topk_softmax_workspace_ = reinterpret_cast<float*>(allocator_->reMalloc(
topk_softmax_workspace_,
topk_softmax_workspace_size_ + sizeof(float) * batch_size * beam_width * vocab_size_padded_,
false));
float_log_prob_buf_ = (float*)((char*)topk_softmax_workspace_ + topk_softmax_workspace_size_);
is_allocate_buffer_ = true;
}
template<typename T>
BeamSearchLayer<T>::BeamSearchLayer(size_t max_batch_size,
size_t head_num,
size_t size_per_head,
size_t beam_width,
size_t vocab_size,
size_t vocab_size_padded,
int end_id,
float diversity_rate,
float temperature,
float len_penalty,
float repetition_penalty,
cudaStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward):
BaseBeamSearchLayer<T>(max_batch_size,
head_num,
size_per_head,
beam_width,
vocab_size,
vocab_size_padded,
end_id,
diversity_rate,
temperature,
len_penalty,
repetition_penalty,
stream,
cublas_wrapper,
allocator,
is_free_buffer_after_forward)
{
}
template<typename T>
BeamSearchLayer<T>::BeamSearchLayer(BeamSearchLayer<T> const& beam_search_layer):
BaseBeamSearchLayer<T>(beam_search_layer)
{
}
template<typename T>
BeamSearchLayer<T>::~BeamSearchLayer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
}
template class BeamSearchLayer<float>;
template class BeamSearchLayer<half>;
} // namespace fastertransformer
|
0076562f7999e334a1ac4cf4b0ff305eaace5b49.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__global__ void sum_row(float *img, float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int I_width, int I_height) // width of image
{
// Shared memory for four tables
__shared__ float buffer_l1[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_l2[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_lx[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_ly[BLOCK_HEIGHT][BLOCK_WIDTH];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int m, cursor;
// Temporal values for prefix sum
float temp_l1, temp_l2, temp_lx, temp_ly;
float last_l1, last_l2, last_lx, last_ly;
for (m = 0; m < (I_width / BLOCK_WIDTH + (I_width%BLOCK_WIDTH>0)); m++)
{
int index_in = row * I_width + blockDim.x * m + threadIdx.x;
int thread_width = BLOCK_WIDTH;
// thread_width is the width of blocks with activated threads
if ((m == I_width / BLOCK_WIDTH) && I_width%BLOCK_WIDTH > 0)
thread_width = I_width%BLOCK_WIDTH;
if (index_in < (I_width * I_height) && (threadIdx.x < thread_width))
{
// Dealing with images whose width is larger than the buffer width
if (m > 0) {
last_l1 = buffer_l1[threadIdx.y][BLOCK_WIDTH - 1];
last_l2 = buffer_l2[threadIdx.y][BLOCK_WIDTH - 1];
last_lx = buffer_lx[threadIdx.y][BLOCK_WIDTH - 1];
last_ly = buffer_ly[threadIdx.y][BLOCK_WIDTH - 1];
}
// Copy from global memory to shared memory
buffer_l1[threadIdx.y][threadIdx.x] = img[index_in];
buffer_l2[threadIdx.y][threadIdx.x] = powf(img[index_in], 2);
buffer_lx[threadIdx.y][threadIdx.x] = img[index_in] * (blockDim.x * m + threadIdx.x);
buffer_ly[threadIdx.y][threadIdx.x] = img[index_in] * row;
// Prefix sum for current array
for (cursor = 1; cursor <= ceilf(log2f(thread_width)); cursor++)
{
if ((threadIdx.x >= __float2int_rd(powf(2, cursor - 1))) && (threadIdx.x < thread_width))
{
temp_l1 = buffer_l1[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_l2 = buffer_l2[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_lx = buffer_lx[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_ly = buffer_ly[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
}
__syncthreads();
if ((threadIdx.x >= __float2int_rd(powf(2, cursor - 1))) && (threadIdx.x < thread_width))
{
buffer_l1[threadIdx.y][threadIdx.x] += temp_l1;
buffer_l2[threadIdx.y][threadIdx.x] += temp_l2;
buffer_lx[threadIdx.y][threadIdx.x] += temp_lx;
buffer_ly[threadIdx.y][threadIdx.x] += temp_ly;
}
__syncthreads();
}
// Dealing with images whose width is larger than the buffer width
if (m > 0) {
buffer_l1[threadIdx.y][threadIdx.x] += last_l1;
buffer_l2[threadIdx.y][threadIdx.x] += last_l2;
buffer_lx[threadIdx.y][threadIdx.x] += last_lx;
buffer_ly[threadIdx.y][threadIdx.x] += last_ly;
}
// Copy from shared memory to global memory
l1_dev[index_in] = buffer_l1[threadIdx.y][threadIdx.x];
l2_dev[index_in] = buffer_l2[threadIdx.y][threadIdx.x];
lx_dev[index_in] = buffer_lx[threadIdx.y][threadIdx.x];
ly_dev[index_in] = buffer_ly[threadIdx.y][threadIdx.x];
}
}
}
__global__ void sum_col(float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int I_width, int I_height)
{
// Shared memory for four tables
__shared__ float buffer_l1[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_l2[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_lx[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_ly[BLOCK_HEIGHT2][BLOCK_WIDTH2];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int m, cursor;
// Temporal values for prefix sum
float temp_l1, temp_l2, temp_lx, temp_ly;
float last_l1, last_l2, last_lx, last_ly;
for (m = 0; m < (I_height / BLOCK_HEIGHT2 + (I_height%BLOCK_HEIGHT2>0)); m++)
{
int index_in = (blockDim.y * m + threadIdx.y) * I_width + col;
int thread_height = BLOCK_HEIGHT2;
if ((m == I_height / BLOCK_HEIGHT2) && I_height%BLOCK_HEIGHT2 > 0)
thread_height = I_height%BLOCK_HEIGHT2;
if ((index_in < I_width * I_height) && col < I_width)
{
// for array length longer than block width. Each element has to add the
// previous sum.
if (m > 0) {
last_l1 = buffer_l1[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_l2 = buffer_l2[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_lx = buffer_lx[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_ly = buffer_ly[BLOCK_HEIGHT2 - 1][threadIdx.x];
}
// Copy from global memory to shared memory (? if buffer length longer than img)
buffer_l1[threadIdx.y][threadIdx.x] = l1_dev[index_in];
buffer_l2[threadIdx.y][threadIdx.x] = l2_dev[index_in];
buffer_lx[threadIdx.y][threadIdx.x] = lx_dev[index_in];
buffer_ly[threadIdx.y][threadIdx.x] = ly_dev[index_in];
// Prefix sum for current array
for (cursor = 1; cursor <= ceilf(log2f(thread_height)); cursor++) {
/*prefix sum*/
if (threadIdx.y >= __float2int_rd(powf(2, cursor - 1))) {
temp_l1 = buffer_l1[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_l2 = buffer_l2[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_lx = buffer_lx[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_ly = buffer_ly[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
}
__syncthreads();
if (threadIdx.y >= __float2int_rd(powf(2, cursor - 1))) {
buffer_l1[threadIdx.y][threadIdx.x] += temp_l1;
buffer_l2[threadIdx.y][threadIdx.x] += temp_l2;
buffer_lx[threadIdx.y][threadIdx.x] += temp_lx;
buffer_ly[threadIdx.y][threadIdx.x] += temp_ly;
}
__syncthreads();
}
// Dealing with images whose height is larger than the buffer height
if (m > 0) {
buffer_l1[threadIdx.y][threadIdx.x] += last_l1;
buffer_l2[threadIdx.y][threadIdx.x] += last_l2;
buffer_lx[threadIdx.y][threadIdx.x] += last_lx;
buffer_ly[threadIdx.y][threadIdx.x] += last_ly;
}
// Copy from shared memory to global memory
l1_dev[index_in] = buffer_l1[threadIdx.y][threadIdx.x];
l2_dev[index_in] = buffer_l2[threadIdx.y][threadIdx.x];
lx_dev[index_in] = buffer_lx[threadIdx.y][threadIdx.x];
ly_dev[index_in] = buffer_ly[threadIdx.y][threadIdx.x];
}
}
}
__global__ void compute_feature(float vt1value, float vt2value, float vt3value, float vt4value, float *v1_dev, float *v2_dev, float *v3_dev, float *v4_dev, float *X_dev, float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int K, int M, int N) {
float S1value = 0;
float S2value = 0;
float Sxvalue = 0;
float Syvalue = 0;
float v1value = 0;
float v2value = 0;
float v3value = 0;
float v4value = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if ((col <= (M - K)) && (row <= (N - K))) {
// Compute four sum of pixel values within every patch.
S1value = (l1_dev[(row + K - 1)*M + (col + K - 1)] - l1_dev[(row + K - 1)*M + (col)] - l1_dev[(row)*M + (col + K - 1)] + l1_dev[(row)*M + (col)]);
S2value = (l2_dev[(row + K - 1)*M + (col + K - 1)] - l2_dev[(row + K - 1)*M + (col)] - l2_dev[(row)*M + (col + K - 1)] + l2_dev[(row)*M + (col)]);
Sxvalue = (lx_dev[(row + K - 1)*M + (col + K - 1)] - lx_dev[(row + K - 1)*M + (col)] - lx_dev[(row)*M + (col + K - 1)] + lx_dev[(row)*M + (col)]);
Syvalue = (ly_dev[(row + K - 1)*M + (col + K - 1)] - ly_dev[(row + K - 1)*M + (col)] - ly_dev[(row)*M + (col + K - 1)] + ly_dev[(row)*M + (col)]);
// Compute four features for every patch and place them in right place.
v1value = S1value / K / K;
v2value = S2value / K / K - v1value*v1value;
v3value = 4.0 * (Sxvalue - (col + 1.0 * (K - 1) / 2) * S1value) / K / K / K;
v4value = 4.0 * (Syvalue - (row + 1.0 * (K - 1) / 2) * S1value) / K / K / K;
v1_dev[row * (M - K + 1) + col] = v1value;
v2_dev[row * (M - K + 1) + col] = v2value;
v3_dev[row * (M - K + 1) + col] = v3value;
v4_dev[row * (M - K + 1) + col] = v4value;
// Compute the square of Euclidean distance between the template and every patch and place the results in right place.
X_dev[row * (M - K + 1) + col] = powf(v1value - vt1value, 2) + powf(v2value - vt2value, 2) + powf(v3value - vt3value, 2) + powf(v4value - vt4value, 2);
}
}
|
0076562f7999e334a1ac4cf4b0ff305eaace5b49.cu
|
#include "kernel.cuh"
__global__ void sum_row(float *img, float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int I_width, int I_height) // width of image
{
// Shared memory for four tables
__shared__ float buffer_l1[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_l2[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_lx[BLOCK_HEIGHT][BLOCK_WIDTH];
__shared__ float buffer_ly[BLOCK_HEIGHT][BLOCK_WIDTH];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int m, cursor;
// Temporal values for prefix sum
float temp_l1, temp_l2, temp_lx, temp_ly;
float last_l1, last_l2, last_lx, last_ly;
for (m = 0; m < (I_width / BLOCK_WIDTH + (I_width%BLOCK_WIDTH>0)); m++)
{
int index_in = row * I_width + blockDim.x * m + threadIdx.x;
int thread_width = BLOCK_WIDTH;
// thread_width is the width of blocks with activated threads
if ((m == I_width / BLOCK_WIDTH) && I_width%BLOCK_WIDTH > 0)
thread_width = I_width%BLOCK_WIDTH;
if (index_in < (I_width * I_height) && (threadIdx.x < thread_width))
{
// Dealing with images whose width is larger than the buffer width
if (m > 0) {
last_l1 = buffer_l1[threadIdx.y][BLOCK_WIDTH - 1];
last_l2 = buffer_l2[threadIdx.y][BLOCK_WIDTH - 1];
last_lx = buffer_lx[threadIdx.y][BLOCK_WIDTH - 1];
last_ly = buffer_ly[threadIdx.y][BLOCK_WIDTH - 1];
}
// Copy from global memory to shared memory
buffer_l1[threadIdx.y][threadIdx.x] = img[index_in];
buffer_l2[threadIdx.y][threadIdx.x] = powf(img[index_in], 2);
buffer_lx[threadIdx.y][threadIdx.x] = img[index_in] * (blockDim.x * m + threadIdx.x);
buffer_ly[threadIdx.y][threadIdx.x] = img[index_in] * row;
// Prefix sum for current array
for (cursor = 1; cursor <= ceilf(log2f(thread_width)); cursor++)
{
if ((threadIdx.x >= __float2int_rd(powf(2, cursor - 1))) && (threadIdx.x < thread_width))
{
temp_l1 = buffer_l1[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_l2 = buffer_l2[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_lx = buffer_lx[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
temp_ly = buffer_ly[threadIdx.y][threadIdx.x - __float2int_rd(powf(2, cursor - 1))];
}
__syncthreads();
if ((threadIdx.x >= __float2int_rd(powf(2, cursor - 1))) && (threadIdx.x < thread_width))
{
buffer_l1[threadIdx.y][threadIdx.x] += temp_l1;
buffer_l2[threadIdx.y][threadIdx.x] += temp_l2;
buffer_lx[threadIdx.y][threadIdx.x] += temp_lx;
buffer_ly[threadIdx.y][threadIdx.x] += temp_ly;
}
__syncthreads();
}
// Dealing with images whose width is larger than the buffer width
if (m > 0) {
buffer_l1[threadIdx.y][threadIdx.x] += last_l1;
buffer_l2[threadIdx.y][threadIdx.x] += last_l2;
buffer_lx[threadIdx.y][threadIdx.x] += last_lx;
buffer_ly[threadIdx.y][threadIdx.x] += last_ly;
}
// Copy from shared memory to global memory
l1_dev[index_in] = buffer_l1[threadIdx.y][threadIdx.x];
l2_dev[index_in] = buffer_l2[threadIdx.y][threadIdx.x];
lx_dev[index_in] = buffer_lx[threadIdx.y][threadIdx.x];
ly_dev[index_in] = buffer_ly[threadIdx.y][threadIdx.x];
}
}
}
__global__ void sum_col(float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int I_width, int I_height)
{
// Shared memory for four tables
__shared__ float buffer_l1[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_l2[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_lx[BLOCK_HEIGHT2][BLOCK_WIDTH2];
__shared__ float buffer_ly[BLOCK_HEIGHT2][BLOCK_WIDTH2];
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int m, cursor;
// Temporal values for prefix sum
float temp_l1, temp_l2, temp_lx, temp_ly;
float last_l1, last_l2, last_lx, last_ly;
for (m = 0; m < (I_height / BLOCK_HEIGHT2 + (I_height%BLOCK_HEIGHT2>0)); m++)
{
int index_in = (blockDim.y * m + threadIdx.y) * I_width + col;
int thread_height = BLOCK_HEIGHT2;
if ((m == I_height / BLOCK_HEIGHT2) && I_height%BLOCK_HEIGHT2 > 0)
thread_height = I_height%BLOCK_HEIGHT2;
if ((index_in < I_width * I_height) && col < I_width)
{
// for array length longer than block width. Each element has to add the
// previous sum.
if (m > 0) {
last_l1 = buffer_l1[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_l2 = buffer_l2[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_lx = buffer_lx[BLOCK_HEIGHT2 - 1][threadIdx.x];
last_ly = buffer_ly[BLOCK_HEIGHT2 - 1][threadIdx.x];
}
// Copy from global memory to shared memory (? if buffer length longer than img)
buffer_l1[threadIdx.y][threadIdx.x] = l1_dev[index_in];
buffer_l2[threadIdx.y][threadIdx.x] = l2_dev[index_in];
buffer_lx[threadIdx.y][threadIdx.x] = lx_dev[index_in];
buffer_ly[threadIdx.y][threadIdx.x] = ly_dev[index_in];
// Prefix sum for current array
for (cursor = 1; cursor <= ceilf(log2f(thread_height)); cursor++) {
/*prefix sum*/
if (threadIdx.y >= __float2int_rd(powf(2, cursor - 1))) {
temp_l1 = buffer_l1[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_l2 = buffer_l2[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_lx = buffer_lx[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
temp_ly = buffer_ly[threadIdx.y - __float2int_rd(powf(2, cursor - 1))][threadIdx.x];
}
__syncthreads();
if (threadIdx.y >= __float2int_rd(powf(2, cursor - 1))) {
buffer_l1[threadIdx.y][threadIdx.x] += temp_l1;
buffer_l2[threadIdx.y][threadIdx.x] += temp_l2;
buffer_lx[threadIdx.y][threadIdx.x] += temp_lx;
buffer_ly[threadIdx.y][threadIdx.x] += temp_ly;
}
__syncthreads();
}
// Dealing with images whose height is larger than the buffer height
if (m > 0) {
buffer_l1[threadIdx.y][threadIdx.x] += last_l1;
buffer_l2[threadIdx.y][threadIdx.x] += last_l2;
buffer_lx[threadIdx.y][threadIdx.x] += last_lx;
buffer_ly[threadIdx.y][threadIdx.x] += last_ly;
}
// Copy from shared memory to global memory
l1_dev[index_in] = buffer_l1[threadIdx.y][threadIdx.x];
l2_dev[index_in] = buffer_l2[threadIdx.y][threadIdx.x];
lx_dev[index_in] = buffer_lx[threadIdx.y][threadIdx.x];
ly_dev[index_in] = buffer_ly[threadIdx.y][threadIdx.x];
}
}
}
__global__ void compute_feature(float vt1value, float vt2value, float vt3value, float vt4value, float *v1_dev, float *v2_dev, float *v3_dev, float *v4_dev, float *X_dev, float *l1_dev, float *l2_dev, float *lx_dev, float *ly_dev, int K, int M, int N) {
float S1value = 0;
float S2value = 0;
float Sxvalue = 0;
float Syvalue = 0;
float v1value = 0;
float v2value = 0;
float v3value = 0;
float v4value = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if ((col <= (M - K)) && (row <= (N - K))) {
// Compute four sum of pixel values within every patch.
S1value = (l1_dev[(row + K - 1)*M + (col + K - 1)] - l1_dev[(row + K - 1)*M + (col)] - l1_dev[(row)*M + (col + K - 1)] + l1_dev[(row)*M + (col)]);
S2value = (l2_dev[(row + K - 1)*M + (col + K - 1)] - l2_dev[(row + K - 1)*M + (col)] - l2_dev[(row)*M + (col + K - 1)] + l2_dev[(row)*M + (col)]);
Sxvalue = (lx_dev[(row + K - 1)*M + (col + K - 1)] - lx_dev[(row + K - 1)*M + (col)] - lx_dev[(row)*M + (col + K - 1)] + lx_dev[(row)*M + (col)]);
Syvalue = (ly_dev[(row + K - 1)*M + (col + K - 1)] - ly_dev[(row + K - 1)*M + (col)] - ly_dev[(row)*M + (col + K - 1)] + ly_dev[(row)*M + (col)]);
// Compute four features for every patch and place them in right place.
v1value = S1value / K / K;
v2value = S2value / K / K - v1value*v1value;
v3value = 4.0 * (Sxvalue - (col + 1.0 * (K - 1) / 2) * S1value) / K / K / K;
v4value = 4.0 * (Syvalue - (row + 1.0 * (K - 1) / 2) * S1value) / K / K / K;
v1_dev[row * (M - K + 1) + col] = v1value;
v2_dev[row * (M - K + 1) + col] = v2value;
v3_dev[row * (M - K + 1) + col] = v3value;
v4_dev[row * (M - K + 1) + col] = v4value;
// Compute the square of Euclidean distance between the template and every patch and place the results in right place.
X_dev[row * (M - K + 1) + col] = powf(v1value - vt1value, 2) + powf(v2value - vt2value, 2) + powf(v3value - vt3value, 2) + powf(v4value - vt4value, 2);
}
}
|
61dbde6ac9b439a3f4314bb06efff05c2866ed40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathPointwise.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathPointwise.cu"
#include <THH/THHGenerateIntType.h>
|
61dbde6ac9b439a3f4314bb06efff05c2866ed40.cu
|
#include "../THCTensorMathPointwise.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathPointwise.cu"
#include <THC/THCGenerateIntType.h>
|
f27f084d388cdb62a56e8c35dd9af8cc6c650e75.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iH, const int iW,
const int oH, const int oW,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW) {
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iHposition = isNCHW ? 2 : 1;
if(isPHasymm)
newShape[iHposition] += 1;
if(isPWasymm)
newShape[iHposition + 1] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void checkConv3dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iD, const int iH, const int iW,
const int oD, const int oH, const int oW,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW) {
const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD);
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPDasymm = pD != (pDsum - pD);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPDasymm && !isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iDposition = isNCDHW ? 2 : 1;
if(isPDasymm)
newShape[iDposition] += 1;
if(isPHasymm)
newShape[iDposition + 1] += 1;
if(isPWasymm)
newShape[iDposition + 2] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCDHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3), 0,input->sizeAt(4)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,input->sizeAt(3), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void pooling2dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling2dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dBpCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int zShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(z, format, cudnnDataType(output->dataType()), numDims, zShape);
else
err = cudnnSetTensorNdDescriptor(z, cudnnDataType(output->dataType()), numDims, zShape, zStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int dzShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(dz, format, cudnnDataType(gradO->dataType()), numDims, dzShape);
else
err = cudnnSetTensorNdDescriptor(dz, cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
// cudnn maxpool2d_bp api requires ff output as one of input arguments
if(mode == CUDNN_POOLING_MAX) {
NDArray temp(gradO);
NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp});
// run ff calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO, &temp});
}
else {
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dBpCUDNN: hipStreamSynchronize failed !", cudaErr);
}
}
}
}
|
f27f084d388cdb62a56e8c35dd9af8cc6c650e75.cu
|
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iH, const int iW,
const int oH, const int oW,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW) {
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iHposition = isNCHW ? 2 : 1;
if(isPHasymm)
newShape[iHposition] += 1;
if(isPWasymm)
newShape[iHposition + 1] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void checkConv3dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iD, const int iH, const int iW,
const int oD, const int oH, const int oW,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW) {
const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD);
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPDasymm = pD != (pDsum - pD);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPDasymm && !isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iDposition = isNCDHW ? 2 : 1;
if(isPDasymm)
newShape[iDposition] += 1;
if(isPHasymm)
newShape[iDposition + 1] += 1;
if(isPWasymm)
newShape[iDposition + 2] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCDHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3), 0,input->sizeAt(4)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,input->sizeAt(3), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void pooling2dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling2dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int zShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(z, format, cudnnDataType(output->dataType()), numDims, zShape);
else
err = cudnnSetTensorNdDescriptor(z, cudnnDataType(output->dataType()), numDims, zShape, zStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int dzShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(dz, format, cudnnDataType(gradO->dataType()), numDims, dzShape);
else
err = cudnnSetTensorNdDescriptor(dz, cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
// cudnn maxpool2d_bp api requires ff output as one of input arguments
if(mode == CUDNN_POOLING_MAX) {
NDArray temp(gradO);
NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp});
// run ff calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO, &temp});
}
else {
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
}
}
}
}
|
7f7b72a5abea8863a676b0c7a38b5b165fbc8930.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
7f7b72a5abea8863a676b0c7a38b5b165fbc8930.cu
|
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
6a5543a4914669a714c8577fa8941f1fd3738fba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T, framework::DataLayout layout, bool HasBias>
__global__ void KeAffineChannelCUDA(const T* x, const T* scale, const T* bias,
const int C, const int HxW, const int num,
T* y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
if (HasBias) {
y[i] = scale[c] * x[i] + bias[c];
} else {
y[i] = scale[c] * x[i];
}
}
}
template <typename DeviceContext, typename T>
class AffineChannelCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<framework::Tensor>("X");
auto* scale = ctx.Input<framework::Tensor>("Scale");
auto* bias = ctx.Input<framework::Tensor>("Bias");
auto* y = ctx.Output<framework::Tensor>("Out");
y->mutable_data<T>(ctx.GetPlace());
const framework::DataLayout layout =
framework::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto dims = x->dims();
const int num = x->numel();
int N = dims[0];
int C = layout == framework::DataLayout::kNCHW ? dims[1]
: dims[dims.size() - 1];
int HxW = num / N / C;
const T* x_d = x->data<T>();
const T* scale_d = scale->data<T>();
const T* bias_d = bias->data<T>();
T* y_d = y->data<T>();
int block = 1024;
int grid = (num + block - 1) / block;
if (layout == framework::DataLayout::kNCHW) {
hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
true>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
x_d, scale_d, bias_d, C, HxW, num, y_d);
} else {
hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNHWC,
true>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
x_d, scale_d, bias_d, C, HxW, num, y_d);
}
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
__global__ void AffineChannelScaleBiasGradientCUDAKernel(
const T* dy, const T* x, const int N, const int C, const int HxW, T* dscale,
T* dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<T, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_sum = 0;
T db_sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += dy[index] * x[index];
db_sum += dy[index];
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename DeviceContext, typename T>
class AffineChannelGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<framework::Tensor>("X");
auto* scale = ctx.Input<framework::Tensor>("Scale");
auto* bias = ctx.Input<framework::Tensor>("Bias");
auto* dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto* dscale =
ctx.Output<framework::Tensor>(framework::GradVarName("Scale"));
auto* dbias = ctx.Output<framework::Tensor>(framework::GradVarName("Bias"));
const framework::DataLayout layout =
framework::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto dims = x->dims();
const int num = x->numel();
int N = dims[0];
int C = layout == framework::DataLayout::kNCHW ? dims[1]
: dims[dims.size() - 1];
int HxW = num / N / C;
const T* x_d = x->data<T>();
const T* dy_d = dy->data<T>();
const T* s_d = scale->data<T>();
T* dx_d = dx ? dx->mutable_data<T>(ctx.GetPlace()) : nullptr;
T* ds_d = dscale ? dscale->mutable_data<T>(ctx.GetPlace()) : nullptr;
T* db_d = dbias ? dbias->mutable_data<T>(ctx.GetPlace()) : nullptr;
const int block = 1024;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
if (layout == framework::DataLayout::kNCHW) {
if (dx) {
hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
false>), dim3(grid1), dim3(block), 0, dev_ctx.stream(),
dy_d, s_d, nullptr, C, HxW, num, dx_d);
}
if (dscale && dbias) {
hipLaunchKernelGGL(( AffineChannelScaleBiasGradientCUDAKernel<
T, block, framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0,
dev_ctx.stream(),
dy_d, x_d, N, C, HxW, ds_d, db_d);
}
} else {
if (dx) {
hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
false>), dim3(grid1), dim3(block), 0, dev_ctx.stream(),
dy_d, s_d, nullptr, C, HxW, num, dx_d);
}
if (dscale && dbias) {
hipLaunchKernelGGL(( AffineChannelScaleBiasGradientCUDAKernel<
T, block, framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0,
dev_ctx.stream(),
dy_d, x_d, N, C, HxW, ds_d, db_d);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(affine_channel,
ops::AffineChannelCUDAKernel<CUDA, float>,
ops::AffineChannelCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(affine_channel_grad,
ops::AffineChannelGradCUDAKernel<CUDA, float>,
ops::AffineChannelGradCUDAKernel<CUDA, double>);
|
6a5543a4914669a714c8577fa8941f1fd3738fba.cu
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "cub/cub.cuh"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T, framework::DataLayout layout, bool HasBias>
__global__ void KeAffineChannelCUDA(const T* x, const T* scale, const T* bias,
const int C, const int HxW, const int num,
T* y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
if (HasBias) {
y[i] = scale[c] * x[i] + bias[c];
} else {
y[i] = scale[c] * x[i];
}
}
}
template <typename DeviceContext, typename T>
class AffineChannelCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<framework::Tensor>("X");
auto* scale = ctx.Input<framework::Tensor>("Scale");
auto* bias = ctx.Input<framework::Tensor>("Bias");
auto* y = ctx.Output<framework::Tensor>("Out");
y->mutable_data<T>(ctx.GetPlace());
const framework::DataLayout layout =
framework::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto dims = x->dims();
const int num = x->numel();
int N = dims[0];
int C = layout == framework::DataLayout::kNCHW ? dims[1]
: dims[dims.size() - 1];
int HxW = num / N / C;
const T* x_d = x->data<T>();
const T* scale_d = scale->data<T>();
const T* bias_d = bias->data<T>();
T* y_d = y->data<T>();
int block = 1024;
int grid = (num + block - 1) / block;
if (layout == framework::DataLayout::kNCHW) {
KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
true><<<grid, block, 0, dev_ctx.stream()>>>(
x_d, scale_d, bias_d, C, HxW, num, y_d);
} else {
KeAffineChannelCUDA<T, framework::DataLayout::kNHWC,
true><<<grid, block, 0, dev_ctx.stream()>>>(
x_d, scale_d, bias_d, C, HxW, num, y_d);
}
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
__global__ void AffineChannelScaleBiasGradientCUDAKernel(
const T* dy, const T* x, const int N, const int C, const int HxW, T* dscale,
T* dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<T, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_sum = 0;
T db_sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += dy[index] * x[index];
db_sum += dy[index];
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename DeviceContext, typename T>
class AffineChannelGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<framework::Tensor>("X");
auto* scale = ctx.Input<framework::Tensor>("Scale");
auto* bias = ctx.Input<framework::Tensor>("Bias");
auto* dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto* dscale =
ctx.Output<framework::Tensor>(framework::GradVarName("Scale"));
auto* dbias = ctx.Output<framework::Tensor>(framework::GradVarName("Bias"));
const framework::DataLayout layout =
framework::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto dims = x->dims();
const int num = x->numel();
int N = dims[0];
int C = layout == framework::DataLayout::kNCHW ? dims[1]
: dims[dims.size() - 1];
int HxW = num / N / C;
const T* x_d = x->data<T>();
const T* dy_d = dy->data<T>();
const T* s_d = scale->data<T>();
T* dx_d = dx ? dx->mutable_data<T>(ctx.GetPlace()) : nullptr;
T* ds_d = dscale ? dscale->mutable_data<T>(ctx.GetPlace()) : nullptr;
T* db_d = dbias ? dbias->mutable_data<T>(ctx.GetPlace()) : nullptr;
const int block = 1024;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
if (layout == framework::DataLayout::kNCHW) {
if (dx) {
KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
false><<<grid1, block, 0, dev_ctx.stream()>>>(
dy_d, s_d, nullptr, C, HxW, num, dx_d);
}
if (dscale && dbias) {
AffineChannelScaleBiasGradientCUDAKernel<
T, block, framework::DataLayout::kNCHW><<<grid2, block, 0,
dev_ctx.stream()>>>(
dy_d, x_d, N, C, HxW, ds_d, db_d);
}
} else {
if (dx) {
KeAffineChannelCUDA<T, framework::DataLayout::kNCHW,
false><<<grid1, block, 0, dev_ctx.stream()>>>(
dy_d, s_d, nullptr, C, HxW, num, dx_d);
}
if (dscale && dbias) {
AffineChannelScaleBiasGradientCUDAKernel<
T, block, framework::DataLayout::kNHWC><<<grid2, block, 0,
dev_ctx.stream()>>>(
dy_d, x_d, N, C, HxW, ds_d, db_d);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(affine_channel,
ops::AffineChannelCUDAKernel<CUDA, float>,
ops::AffineChannelCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(affine_channel_grad,
ops::AffineChannelGradCUDAKernel<CUDA, float>,
ops::AffineChannelGradCUDAKernel<CUDA, double>);
|
561f43e7ce81e404597a3f0a7fd2b0fee426c125.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "BasicUtils/BasicException.h"
#include "FlexibleDiffusionSolverFE_GPU_CUDA.h"
#include "../DiffSecrData.h"
#include "CUDAUtilsHeader.h"
#include "../GPUSolverBasicData.h"
#include <iostream>
# define BLOCK_SIZE_FRAME (BLOCK_SIZE+2)
using std::cerr;
using std::endl;
using std::vector;
using std::string;
using std::swap;
namespace CompuCell3D {
FlexibleDiffusionSolverFE_GPU_CUDA::FlexibleDiffusionSolverFE_GPU_CUDA():h_solverParamPtr(NULL),
d_field(NULL),
d_celltype_field(NULL),
d_boundary_field(NULL),
d_scratch(NULL),
d_solverParam(NULL),
mem_size_field(0),
mem_size_celltype_field(0)
{
}
FlexibleDiffusionSolverFE_GPU_CUDA::~FlexibleDiffusionSolverFE_GPU_CUDA()
{
if (h_solverParamPtr)
checkCudaErrors(hipHostFree(h_solverParamPtr));
if (d_field)
checkCudaErrors(hipFree(d_field));
if (d_scratch)
checkCudaErrors(hipFree(d_scratch));
if (d_celltype_field)
checkCudaErrors(hipFree(d_celltype_field));
if (d_boundary_field)
checkCudaErrors(hipFree(d_boundary_field));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::init(int gpuDeviceIndex, LatticeType lt, size_t fieldLen){
//hipSetDevice( /*cutGetMaxGflopsDeviceId()*/0);
//TODO: reimplement device selector
//not the most efficient code...
//refactoring needed (separate device selection from user messages)
if(gpuDeviceIndex==-1){//select the fastest GPU device
cerr<<"Selecting the fastest GPU device...\n";
int num_devices, device;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_multiprocessors = 0, max_device = 0;
for (device = 0; device < num_devices; device++) {
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, max_device);
cerr<<"GPU device "<<max_device<<" selected; GPU device name: "<<properties.name<<endl;
hipSetDevice(max_device);
gpuDeviceIndex=max_device;
}else{
cerr<<"Only one GPU device available, will use it (#0)\n";
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, 0);
cerr<<"GPU device name: "<<properties.name<<endl;
}
}else{
hipError_t err=hipSetDevice(gpuDeviceIndex);
if(err!=hipSuccess){
cerr<<"Can't use the GPU device # "<<gpuDeviceIndex<<" (error code: "<<err<<", err message: "<<hipGetErrorString(err)<<")"<<"\n";
exit(-1);
}
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, gpuDeviceIndex);
cerr<<"GPU device name: "<<properties.name<<endl;
}
alloc(fieldLen);
}
void FlexibleDiffusionSolverFE_GPU_CUDA::alloc(size_t fieldLen){
unsigned int flags = hipHostMallocMapped;
checkCudaErrors(hipHostMalloc((void **)&h_solverParamPtr, sizeof(SolverParams_t), flags));
cerr<<"h_solverParamPtr-"<<h_solverParamPtr<<endl;
// allocate device memory
mem_size_field=fieldLen*sizeof(float);
mem_size_celltype_field=fieldLen*sizeof(unsigned char);
checkCudaErrors(hipMalloc((void**) &d_field, mem_size_field));
//
checkCudaErrors(hipMalloc((void**) &d_celltype_field, mem_size_celltype_field));
checkCudaErrors(hipMalloc((void**) &d_boundary_field, mem_size_celltype_field));
//
checkCudaErrors(hipMalloc((void**) &d_scratch, mem_size_field));
//enabling sharing of the h_solverParamPtr between host and device
checkCudaErrors(hipHostGetDevicePointer((void **)&d_solverParam, (void *)h_solverParamPtr, 0));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::prepareSolverParams(Dim3D fieldDim, DiffusionData const &diffData){
SolverParams_t &h_solverParam = *h_solverParamPtr;
h_solverParam.dimx=fieldDim.x;
h_solverParam.dimy=fieldDim.y;
h_solverParam.dimz=fieldDim.z;
h_solverParam.dx=1.0;
h_solverParam.dt=1.0;
h_solverParam.numberOfCelltypes=2;
for (int i=0 ; i<UCHAR_MAX+1 ; ++i){
h_solverParam.diffCoef[i]=diffData.diffCoef[i];
h_solverParam.decayCoef[i]=diffData.decayCoef[i];
//cerr<<"h_solverParam.diffCoef["<<i<<"]="<<h_solverParam.diffCoef[i]<<endl;
}
}
string FlexibleDiffusionSolverFE_GPU_CUDA::solverName(){
cerr<<"Calling FlexibleDiffusionSolverFE_GPU_CUDA::solverName"<<endl;
return "FlexibleDiffusionSolverFE_CUDA";
}
void FlexibleDiffusionSolverFE_GPU_CUDA::fieldHostToDevice(float const *h_field){
checkCudaErrors(hipMemcpy(d_field, h_field, mem_size_field,
hipMemcpyHostToDevice));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::fieldDeviceToHost(float *h_field)const{
checkCudaErrors(hipMemcpy(h_field, d_scratch, mem_size_field,hipMemcpyDeviceToHost) );
}
void FlexibleDiffusionSolverFE_GPU_CUDA::swapScratchAndField(){
swap(d_field, d_scratch);
}
void FlexibleDiffusionSolverFE_GPU_CUDA::initCellTypeArray(unsigned char *arr, size_t arrLength){
//cerr<<"h_celltype_field->getArraySize()="<<arrLength<<" mem_size_celltype_field="<<mem_size_celltype_field<<endl;
////h_celltype_field=cellTypeMonitorPlugin->getCellTypeArray();
checkCudaErrors(hipMemcpy(d_celltype_field, arr, arrLength*sizeof(*d_celltype_field),hipMemcpyHostToDevice));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::initBoundaryArray(unsigned char *arr, size_t arrLength){
checkCudaErrors(hipMemcpy(d_boundary_field, arr, arrLength*sizeof(*d_boundary_field),hipMemcpyHostToDevice));
}
__global__ void diffSolverKernel( float* field, float* scratch,unsigned char * celltype, SolverParams_t *solverParams){
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bz=0; //simulated blockIdx.z
int DIMX=solverParams->dimx;
int DIMY=solverParams->dimy;
int DIMZ=solverParams->dimz;
int bz_max=DIMZ/BLOCK_SIZE;
//each thread copies data into shared memory
int threadsPerBlock=BLOCK_SIZE*BLOCK_SIZE*BLOCK_SIZE;
__shared__ float fieldBlock[BLOCK_SIZE+2][BLOCK_SIZE+2][BLOCK_SIZE+2];
__shared__ unsigned char celltypeBlock[BLOCK_SIZE+2][BLOCK_SIZE+2][BLOCK_SIZE+2];
__shared__ float scratchBlock[BLOCK_SIZE][BLOCK_SIZE][BLOCK_SIZE];
for (bz=0 ; bz<bz_max ; ++bz){
//mapping from block,threadIdx to x,y,zof the inner frame
int x= bx*BLOCK_SIZE+tx;
int y= by*BLOCK_SIZE+ty;
int z= bz*BLOCK_SIZE+tz;
//int offset=threadsPerBlock*bx+threadsPerBlock*blockDim.x*by+DIMX*DIMY*BLOCK_SIZE*bz;
fieldBlock[tx+1][ty+1][tz+1] = field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][tz+1] = celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
scratchBlock[tx][ty][tz]=0.0;
//fieldBlock(tx+1, ty+1, tz+1) = field[offset+tz*BLOCK_SIZE*BLOCK_SIZE+ty*BLOCK_SIZE+tx];
if (tx==0){
fieldBlock[0][ty+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x];
celltypeBlock[0][ty+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x];
}
if (tx==BLOCK_SIZE-1){
fieldBlock[BLOCK_SIZE+1][ty+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2];
celltypeBlock[BLOCK_SIZE+1][ty+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2];
}
if (ty==0){
fieldBlock[tx+1][0][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1];
celltypeBlock[tx+1][0][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1];
}
if (ty==BLOCK_SIZE-1){
fieldBlock[tx+1][BLOCK_SIZE+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1];
celltypeBlock[tx+1][BLOCK_SIZE+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1];
}
if (tz==0){
fieldBlock[tx+1][ty+1][0]=field[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][0]=celltype[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
}
if (tz==BLOCK_SIZE-1){
fieldBlock[tx+1][ty+1][BLOCK_SIZE+1]=field[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][BLOCK_SIZE+1]=celltype[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
}
__syncthreads();
//solve actual diff equation
float concentrationSum =0.0;
float dt_dx2=solverParams->dt/(solverParams->dx*solverParams->dx);
int curentCelltype=celltypeBlock[tx+1][ty+1][tz+1];
concentrationSum=fieldBlock[tx+2][ty+1][tz+1]+fieldBlock[tx+1][ty+2][tz+1]+fieldBlock[tx+1][ty+1][tz+2]
+fieldBlock[tx][ty+1][tz+1]+fieldBlock[tx+1][ty][tz+1]+fieldBlock[tx+1][ty+1][tz]-6*fieldBlock[tx+1][ty+1][tz+1];
float * diffCoef=solverParams->diffCoef;
float * decayCoef=solverParams->decayCoef;
concentrationSum*=diffCoef[curentCelltype];
float varDiffSumTerm=0.0;
//mixing central difference first derivatives with forward second derivatives does not work
//terms due to variable diffusion coef
////x partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+2][ty+1][tz+1]]-diffCoef[celltypeBlock[tx][ty+1][tz+1]])*(fieldBlock[tx+2][ty+1][tz+1]-fieldBlock[tx][ty+1][tz+1]);
////y partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+2][tz+1]]-diffCoef[celltypeBlock[tx+1][ty][tz+1]])*(fieldBlock[tx+1][ty+2][tz+1]-fieldBlock[tx+1][ty][tz+1]);
////z partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+1][tz+2]]-diffCoef[celltypeBlock[tx+1][ty+1][tz]])*(fieldBlock[tx+1][ty+1][tz+2]-fieldBlock[tx+1][ty+1][tz]);
//scratchBlock[tx][ty][tz]=diffConst*(concentrationSum-6*fieldBlock[tx+1][ty+1][tz+1])+fieldBlock[tx+1][ty+1][tz+1];
//scratchBlock[tx][ty][tz]=dt_4dx2*(concentrationSum+4*varDiffSumTerm)+fieldBlock[tx+1][ty+1][tz+1];
//scratchBlock[tx][ty][tz]=dt_4dx2*(concentrationSum+varDiffSumTerm)+fieldBlock[tx+1][ty+1][tz+1];
//using forward first derivatives
//x partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+2][ty+1][tz+1]]-diffCoef[curentCelltype])*(fieldBlock[tx+2][ty+1][tz+1]-fieldBlock[tx+1][ty+1][tz+1]);
//y partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+2][tz+1]]-diffCoef[curentCelltype])*(fieldBlock[tx+1][ty+2][tz+1]-fieldBlock[tx+1][ty+1][tz+1]);
//z partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+1][tz+2]]-diffCoef[curentCelltype])*(fieldBlock[tx+1][ty+1][tz+2]-fieldBlock[tx+1][ty+1][tz+1]);
//OK
scratchBlock[tx][ty][tz]=dt_dx2*(concentrationSum+varDiffSumTerm)+(1-solverParams->dt*decayCoef[curentCelltype])*fieldBlock[tx+1][ty+1][tz+1];
//simple consistency check
//scratchBlock[tx][ty][tz]=concentrationSum;
//scratchBlock[tx][ty][tz]=fieldBlock[tx+2][ty+1][tz+1]+fieldBlock[tx][ty+1][tz+1]+fieldBlock[tx+1][ty+2][tz+1]+fieldBlock[tx+1][ty][tz+1]+fieldBlock[tx+1][ty+1][tz+2]+fieldBlock[tx+1][ty+1][tz];
//scratchBlock[tx][ty][tz]=fieldBlock[tx+1][ty+1][tz+1];
//fieldBlock[tx+1][ty+1][tz+1]=3000.0f;
__syncthreads();
//copy scratchBlock to scratch field on the device
scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratchBlock[tx][ty][tz];
//scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=3000.0;
__syncthreads();
//boundary condition
//if(x==0){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(x==solverParams->dimx-1){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(y==0){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(y==solverParams->dimy-1){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(z==0){
// scratch[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(z==solverParams->dimz-1){
// scratch[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
}
//__syncthreads();
}
void FlexibleDiffusionSolverFE_GPU_CUDA::diffuseSingleField(){
//we cannot access device variable (e.g. d_solverParam) from this part of the code - only kernel is allowed to do this
//here we are using page-locked memory to share SolverParams_t structure between device and host
unsigned int dimX=h_solverParamPtr->dimx;
unsigned int dimY=h_solverParamPtr->dimy;
unsigned int dimZ=h_solverParamPtr->dimz;
SolverParams_t * d_solverParamFromMappedMemory;
hipHostGetDevicePointer((void **)&d_solverParamFromMappedMemory, (void *)h_solverParamPtr, 0);
//cutilSafeCall(hipMemcpy(d_solverParamFromMappedMemory, h_solverParam, sizeof(SolverParams_t ),hipMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(dimX / threads.x, dimY / threads.y);
hipLaunchKernelGGL(( diffSolverKernel), dim3(grid), dim3(threads) , 0, 0, d_field, d_scratch,d_celltype_field,d_solverParamFromMappedMemory);
//diffSolverKernel<<< grid, threads >>>(d_field, d_scratch,d_celltype_field,d_solverParam);
hipDeviceSynchronize();//TODO: this synchronization looks redundant. Copying memory back to host implies implicit synchronization
}
}//namespace CompuCell3D
|
561f43e7ce81e404597a3f0a7fd2b0fee426c125.cu
|
//#include "BasicUtils/BasicException.h"
#include "FlexibleDiffusionSolverFE_GPU_CUDA.h"
#include "../DiffSecrData.h"
#include "CUDAUtilsHeader.h"
#include "../GPUSolverBasicData.h"
#include <iostream>
# define BLOCK_SIZE_FRAME (BLOCK_SIZE+2)
using std::cerr;
using std::endl;
using std::vector;
using std::string;
using std::swap;
namespace CompuCell3D {
FlexibleDiffusionSolverFE_GPU_CUDA::FlexibleDiffusionSolverFE_GPU_CUDA():h_solverParamPtr(NULL),
d_field(NULL),
d_celltype_field(NULL),
d_boundary_field(NULL),
d_scratch(NULL),
d_solverParam(NULL),
mem_size_field(0),
mem_size_celltype_field(0)
{
}
FlexibleDiffusionSolverFE_GPU_CUDA::~FlexibleDiffusionSolverFE_GPU_CUDA()
{
if (h_solverParamPtr)
checkCudaErrors(cudaFreeHost(h_solverParamPtr));
if (d_field)
checkCudaErrors(cudaFree(d_field));
if (d_scratch)
checkCudaErrors(cudaFree(d_scratch));
if (d_celltype_field)
checkCudaErrors(cudaFree(d_celltype_field));
if (d_boundary_field)
checkCudaErrors(cudaFree(d_boundary_field));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::init(int gpuDeviceIndex, LatticeType lt, size_t fieldLen){
//cudaSetDevice( /*cutGetMaxGflopsDeviceId()*/0);
//TODO: reimplement device selector
//not the most efficient code...
//refactoring needed (separate device selection from user messages)
if(gpuDeviceIndex==-1){//select the fastest GPU device
cerr<<"Selecting the fastest GPU device...\n";
int num_devices, device;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_multiprocessors = 0, max_device = 0;
for (device = 0; device < num_devices; device++) {
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, max_device);
cerr<<"GPU device "<<max_device<<" selected; GPU device name: "<<properties.name<<endl;
cudaSetDevice(max_device);
gpuDeviceIndex=max_device;
}else{
cerr<<"Only one GPU device available, will use it (#0)\n";
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
cerr<<"GPU device name: "<<properties.name<<endl;
}
}else{
cudaError_t err=cudaSetDevice(gpuDeviceIndex);
if(err!=cudaSuccess){
cerr<<"Can't use the GPU device # "<<gpuDeviceIndex<<" (error code: "<<err<<", err message: "<<cudaGetErrorString(err)<<")"<<"\n";
exit(-1);
}
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, gpuDeviceIndex);
cerr<<"GPU device name: "<<properties.name<<endl;
}
alloc(fieldLen);
}
void FlexibleDiffusionSolverFE_GPU_CUDA::alloc(size_t fieldLen){
unsigned int flags = cudaHostAllocMapped;
checkCudaErrors(cudaHostAlloc((void **)&h_solverParamPtr, sizeof(SolverParams_t), flags));
cerr<<"h_solverParamPtr-"<<h_solverParamPtr<<endl;
// allocate device memory
mem_size_field=fieldLen*sizeof(float);
mem_size_celltype_field=fieldLen*sizeof(unsigned char);
checkCudaErrors(cudaMalloc((void**) &d_field, mem_size_field));
//
checkCudaErrors(cudaMalloc((void**) &d_celltype_field, mem_size_celltype_field));
checkCudaErrors(cudaMalloc((void**) &d_boundary_field, mem_size_celltype_field));
//
checkCudaErrors(cudaMalloc((void**) &d_scratch, mem_size_field));
//enabling sharing of the h_solverParamPtr between host and device
checkCudaErrors(cudaHostGetDevicePointer((void **)&d_solverParam, (void *)h_solverParamPtr, 0));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::prepareSolverParams(Dim3D fieldDim, DiffusionData const &diffData){
SolverParams_t &h_solverParam = *h_solverParamPtr;
h_solverParam.dimx=fieldDim.x;
h_solverParam.dimy=fieldDim.y;
h_solverParam.dimz=fieldDim.z;
h_solverParam.dx=1.0;
h_solverParam.dt=1.0;
h_solverParam.numberOfCelltypes=2;
for (int i=0 ; i<UCHAR_MAX+1 ; ++i){
h_solverParam.diffCoef[i]=diffData.diffCoef[i];
h_solverParam.decayCoef[i]=diffData.decayCoef[i];
//cerr<<"h_solverParam.diffCoef["<<i<<"]="<<h_solverParam.diffCoef[i]<<endl;
}
}
string FlexibleDiffusionSolverFE_GPU_CUDA::solverName(){
cerr<<"Calling FlexibleDiffusionSolverFE_GPU_CUDA::solverName"<<endl;
return "FlexibleDiffusionSolverFE_CUDA";
}
void FlexibleDiffusionSolverFE_GPU_CUDA::fieldHostToDevice(float const *h_field){
checkCudaErrors(cudaMemcpy(d_field, h_field, mem_size_field,
cudaMemcpyHostToDevice));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::fieldDeviceToHost(float *h_field)const{
checkCudaErrors(cudaMemcpy(h_field, d_scratch, mem_size_field,cudaMemcpyDeviceToHost) );
}
void FlexibleDiffusionSolverFE_GPU_CUDA::swapScratchAndField(){
swap(d_field, d_scratch);
}
void FlexibleDiffusionSolverFE_GPU_CUDA::initCellTypeArray(unsigned char *arr, size_t arrLength){
//cerr<<"h_celltype_field->getArraySize()="<<arrLength<<" mem_size_celltype_field="<<mem_size_celltype_field<<endl;
////h_celltype_field=cellTypeMonitorPlugin->getCellTypeArray();
checkCudaErrors(cudaMemcpy(d_celltype_field, arr, arrLength*sizeof(*d_celltype_field),cudaMemcpyHostToDevice));
}
void FlexibleDiffusionSolverFE_GPU_CUDA::initBoundaryArray(unsigned char *arr, size_t arrLength){
checkCudaErrors(cudaMemcpy(d_boundary_field, arr, arrLength*sizeof(*d_boundary_field),cudaMemcpyHostToDevice));
}
__global__ void diffSolverKernel( float* field, float* scratch,unsigned char * celltype, SolverParams_t *solverParams){
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bz=0; //simulated blockIdx.z
int DIMX=solverParams->dimx;
int DIMY=solverParams->dimy;
int DIMZ=solverParams->dimz;
int bz_max=DIMZ/BLOCK_SIZE;
//each thread copies data into shared memory
int threadsPerBlock=BLOCK_SIZE*BLOCK_SIZE*BLOCK_SIZE;
__shared__ float fieldBlock[BLOCK_SIZE+2][BLOCK_SIZE+2][BLOCK_SIZE+2];
__shared__ unsigned char celltypeBlock[BLOCK_SIZE+2][BLOCK_SIZE+2][BLOCK_SIZE+2];
__shared__ float scratchBlock[BLOCK_SIZE][BLOCK_SIZE][BLOCK_SIZE];
for (bz=0 ; bz<bz_max ; ++bz){
//mapping from block,threadIdx to x,y,zof the inner frame
int x= bx*BLOCK_SIZE+tx;
int y= by*BLOCK_SIZE+ty;
int z= bz*BLOCK_SIZE+tz;
//int offset=threadsPerBlock*bx+threadsPerBlock*blockDim.x*by+DIMX*DIMY*BLOCK_SIZE*bz;
fieldBlock[tx+1][ty+1][tz+1] = field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][tz+1] = celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
scratchBlock[tx][ty][tz]=0.0;
//fieldBlock(tx+1, ty+1, tz+1) = field[offset+tz*BLOCK_SIZE*BLOCK_SIZE+ty*BLOCK_SIZE+tx];
if (tx==0){
fieldBlock[0][ty+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x];
celltypeBlock[0][ty+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x];
}
if (tx==BLOCK_SIZE-1){
fieldBlock[BLOCK_SIZE+1][ty+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2];
celltypeBlock[BLOCK_SIZE+1][ty+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2];
}
if (ty==0){
fieldBlock[tx+1][0][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1];
celltypeBlock[tx+1][0][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1];
}
if (ty==BLOCK_SIZE-1){
fieldBlock[tx+1][BLOCK_SIZE+1][tz+1]=field[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1];
celltypeBlock[tx+1][BLOCK_SIZE+1][tz+1]=celltype[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1];
}
if (tz==0){
fieldBlock[tx+1][ty+1][0]=field[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][0]=celltype[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
}
if (tz==BLOCK_SIZE-1){
fieldBlock[tx+1][ty+1][BLOCK_SIZE+1]=field[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
celltypeBlock[tx+1][ty+1][BLOCK_SIZE+1]=celltype[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
}
__syncthreads();
//solve actual diff equation
float concentrationSum =0.0;
float dt_dx2=solverParams->dt/(solverParams->dx*solverParams->dx);
int curentCelltype=celltypeBlock[tx+1][ty+1][tz+1];
concentrationSum=fieldBlock[tx+2][ty+1][tz+1]+fieldBlock[tx+1][ty+2][tz+1]+fieldBlock[tx+1][ty+1][tz+2]
+fieldBlock[tx][ty+1][tz+1]+fieldBlock[tx+1][ty][tz+1]+fieldBlock[tx+1][ty+1][tz]-6*fieldBlock[tx+1][ty+1][tz+1];
float * diffCoef=solverParams->diffCoef;
float * decayCoef=solverParams->decayCoef;
concentrationSum*=diffCoef[curentCelltype];
float varDiffSumTerm=0.0;
//mixing central difference first derivatives with forward second derivatives does not work
//terms due to variable diffusion coef
////x partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+2][ty+1][tz+1]]-diffCoef[celltypeBlock[tx][ty+1][tz+1]])*(fieldBlock[tx+2][ty+1][tz+1]-fieldBlock[tx][ty+1][tz+1]);
////y partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+2][tz+1]]-diffCoef[celltypeBlock[tx+1][ty][tz+1]])*(fieldBlock[tx+1][ty+2][tz+1]-fieldBlock[tx+1][ty][tz+1]);
////z partial derivatives
//varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+1][tz+2]]-diffCoef[celltypeBlock[tx+1][ty+1][tz]])*(fieldBlock[tx+1][ty+1][tz+2]-fieldBlock[tx+1][ty+1][tz]);
//scratchBlock[tx][ty][tz]=diffConst*(concentrationSum-6*fieldBlock[tx+1][ty+1][tz+1])+fieldBlock[tx+1][ty+1][tz+1];
//scratchBlock[tx][ty][tz]=dt_4dx2*(concentrationSum+4*varDiffSumTerm)+fieldBlock[tx+1][ty+1][tz+1];
//scratchBlock[tx][ty][tz]=dt_4dx2*(concentrationSum+varDiffSumTerm)+fieldBlock[tx+1][ty+1][tz+1];
//using forward first derivatives
//x partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+2][ty+1][tz+1]]-diffCoef[curentCelltype])*(fieldBlock[tx+2][ty+1][tz+1]-fieldBlock[tx+1][ty+1][tz+1]);
//y partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+2][tz+1]]-diffCoef[curentCelltype])*(fieldBlock[tx+1][ty+2][tz+1]-fieldBlock[tx+1][ty+1][tz+1]);
//z partial derivatives
varDiffSumTerm+=(diffCoef[celltypeBlock[tx+1][ty+1][tz+2]]-diffCoef[curentCelltype])*(fieldBlock[tx+1][ty+1][tz+2]-fieldBlock[tx+1][ty+1][tz+1]);
//OK
scratchBlock[tx][ty][tz]=dt_dx2*(concentrationSum+varDiffSumTerm)+(1-solverParams->dt*decayCoef[curentCelltype])*fieldBlock[tx+1][ty+1][tz+1];
//simple consistency check
//scratchBlock[tx][ty][tz]=concentrationSum;
//scratchBlock[tx][ty][tz]=fieldBlock[tx+2][ty+1][tz+1]+fieldBlock[tx][ty+1][tz+1]+fieldBlock[tx+1][ty+2][tz+1]+fieldBlock[tx+1][ty][tz+1]+fieldBlock[tx+1][ty+1][tz+2]+fieldBlock[tx+1][ty+1][tz];
//scratchBlock[tx][ty][tz]=fieldBlock[tx+1][ty+1][tz+1];
//fieldBlock[tx+1][ty+1][tz+1]=3000.0f;
__syncthreads();
//copy scratchBlock to scratch field on the device
scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratchBlock[tx][ty][tz];
//scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=3000.0;
__syncthreads();
//boundary condition
//if(x==0){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(x==solverParams->dimx-1){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+2]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(y==0){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(y==solverParams->dimy-1){
// scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+2)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(z==0){
// scratch[(z)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
//if(z==solverParams->dimz-1){
// scratch[(z+2)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1]=scratch[(z+1)*(DIMX+2)*(DIMY+2)+(y+1)*(DIMX+2)+x+1];
//}
}
//__syncthreads();
}
void FlexibleDiffusionSolverFE_GPU_CUDA::diffuseSingleField(){
//we cannot access device variable (e.g. d_solverParam) from this part of the code - only kernel is allowed to do this
//here we are using page-locked memory to share SolverParams_t structure between device and host
unsigned int dimX=h_solverParamPtr->dimx;
unsigned int dimY=h_solverParamPtr->dimy;
unsigned int dimZ=h_solverParamPtr->dimz;
SolverParams_t * d_solverParamFromMappedMemory;
cudaHostGetDevicePointer((void **)&d_solverParamFromMappedMemory, (void *)h_solverParamPtr, 0);
//cutilSafeCall(cudaMemcpy(d_solverParamFromMappedMemory, h_solverParam, sizeof(SolverParams_t ),cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(dimX / threads.x, dimY / threads.y);
diffSolverKernel<<< grid, threads >>>(d_field, d_scratch,d_celltype_field,d_solverParamFromMappedMemory);
//diffSolverKernel<<< grid, threads >>>(d_field, d_scratch,d_celltype_field,d_solverParam);
cudaThreadSynchronize();//TODO: this synchronization looks redundant. Copying memory back to host implies implicit synchronization
}
}//namespace CompuCell3D
|
e694c18c9a85790d6e8d50461a7a3f92526ef921.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world. I'm a thread in block %d\n", blockIdx.x);
printf("Hello world. I'm a thread number %d\n", threadIdx.x);
}
int main(int argc, char **argv)
{
hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
//hipDeviceSynchronize();
printf("That's all!\n");
//return 0;
}
|
e694c18c9a85790d6e8d50461a7a3f92526ef921.cu
|
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world. I'm a thread in block %d\n", blockIdx.x);
printf("Hello world. I'm a thread number %d\n", threadIdx.x);
}
int main(int argc, char **argv)
{
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
//cudaDeviceSynchronize();
printf("That's all!\n");
//return 0;
}
|
9a4f39ea126e340c699e414c5cfc028cb9bb68a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Device code
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
// Host Code
float *x, *y, *d_x, *d_y;
// x,y Points to the host arrays - d_x, d_y to the device arrays.
// malloc
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
// hipMalloc
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
// init host arr
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// last option - direction of copy
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
// thread blocks required to process all N elements of the arrays
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
// Launching grid of thread blocks - number of thread blocks in grid, number of threads in a thread block
//
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
|
9a4f39ea126e340c699e414c5cfc028cb9bb68a7.cu
|
#include <stdio.h>
// Device code
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
// Host Code
float *x, *y, *d_x, *d_y;
// x,y Points to the host arrays - d_x, d_y to the device arrays.
// malloc
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
// cudaMalloc
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
// init host arr
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// last option - direction of copy
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
// thread blocks required to process all N elements of the arrays
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
// Launching grid of thread blocks - number of thread blocks in grid, number of threads in a thread block
//
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
71f28d4f9ddf2e01a282f15f54e7182b1a64ce87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
struct softmargin_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const
{
return log(1 + exp(-x*y));
}
};
void THNN_CudaSoftMarginCriterion_updateOutput(THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *output,
int sizeAverage
)
{
THCUNN_assertSameGPU(state, 2, input, target);
float sum;
ptrdiff_t size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), softmargin_functor());
if(sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct softmargin_updateGradInput_functor
{
const float norm;
softmargin_updateGradInput_functor(float norm_) :
norm(norm_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
float temp = exp(-x*y);
return -y*temp*norm/(1.f + temp);
}
};
void THNN_CudaSoftMarginCriterion_updateGradInput(THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *gradInput,
int sizeAverage
)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
ptrdiff_t size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, softmargin_updateGradInput_functor(norm));
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
|
71f28d4f9ddf2e01a282f15f54e7182b1a64ce87.cu
|
#include "THCUNN.h"
#include "common.h"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
struct softmargin_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const
{
return log(1 + exp(-x*y));
}
};
void THNN_CudaSoftMarginCriterion_updateOutput(THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *output,
int sizeAverage
)
{
THCUNN_assertSameGPU(state, 2, input, target);
float sum;
ptrdiff_t size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), softmargin_functor());
if(sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct softmargin_updateGradInput_functor
{
const float norm;
softmargin_updateGradInput_functor(float norm_) :
norm(norm_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
float temp = exp(-x*y);
return -y*temp*norm/(1.f + temp);
}
};
void THNN_CudaSoftMarginCriterion_updateGradInput(THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *gradInput,
int sizeAverage
)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
ptrdiff_t size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, softmargin_updateGradInput_functor(norm));
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
|
e30ea8f093a042dce73cd87f3a86107de33fa713.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack passwordcrack_cuda.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password1[] = "SO1942";
char plain_password2[] = "NU4019";
char plain_password3[] = "CH5781";
char plain_password4[] = "AN4573";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *p1 = plain_password1;
char *p2 = plain_password2;
char *p3 = plain_password3;
char *p4 = plain_password4;
while(*a == *p1) {
if(*a == '\0')
{
printf("Password: %s\n",plain_password1);
break;
}
a++;
p1++;
}
while(*b == *p2) {
if(*b == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while(*c == *p3) {
if(*c == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while(*d == *p4) {
if(*d == '\0')
{
printf("Password: %s\n",plain_password4);
return 1;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel() {
char i1,i2,i3,i4;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password[0] = firstMatch;
password[1] = secondMatch;
for(i1='0'; i1<='9'; i1++){
for(i2='0'; i2<='9'; i2++){
for(i3='0'; i3<='9'; i3++){
for(i4='0'; i4<='9'; i4++){
password[2] = i1;
password[3] = i2;
password[4] = i3;
password[5] = i4;
if(is_a_match(password)) {
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
e30ea8f093a042dce73cd87f3a86107de33fa713.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack passwordcrack_cuda.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password1[] = "SO1942";
char plain_password2[] = "NU4019";
char plain_password3[] = "CH5781";
char plain_password4[] = "AN4573";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *p1 = plain_password1;
char *p2 = plain_password2;
char *p3 = plain_password3;
char *p4 = plain_password4;
while(*a == *p1) {
if(*a == '\0')
{
printf("Password: %s\n",plain_password1);
break;
}
a++;
p1++;
}
while(*b == *p2) {
if(*b == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while(*c == *p3) {
if(*c == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while(*d == *p4) {
if(*d == '\0')
{
printf("Password: %s\n",plain_password4);
return 1;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel() {
char i1,i2,i3,i4;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password[0] = firstMatch;
password[1] = secondMatch;
for(i1='0'; i1<='9'; i1++){
for(i2='0'; i2<='9'; i2++){
for(i3='0'; i3<='9'; i3++){
for(i4='0'; i4<='9'; i4++){
password[2] = i1;
password[3] = i2;
password[4] = i3;
password[5] = i4;
if(is_a_match(password)) {
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
5f9832d2f253542d8c3b8ec3cb8876be9e2dd0ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ mixed3 multiplyComplexRealPart(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2r-c1.y*c2i;
}
__device__ mixed3 multiplyComplexImagPart(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2i+c1.y*c2r;
}
__device__ mixed3 multiplyComplexRealPartConj(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2r+c1.y*c2i;
}
__device__ mixed3 multiplyComplexImagPartConj(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2i-c1.y*c2r;
}
/**
* Compute the contracted positions
*/
extern "C" __global__ void contractPositions(mixed4* posq, mixed4* contracted) {
const int numBlocks = (blockDim.x*gridDim.x)/NUM_COPIES;
const int blockStart = NUM_COPIES*(threadIdx.x/NUM_COPIES);
const int indexInBlock = threadIdx.x-blockStart;
__shared__ mixed3 q[2*THREAD_BLOCK_SIZE];
__shared__ mixed3 temp[2*THREAD_BLOCK_SIZE];
__shared__ mixed2 w[NUM_COPIES];
mixed3* qreal = &q[blockStart];
mixed3* qimag = &q[blockStart+blockDim.x];
mixed3* tempreal = &temp[blockStart];
mixed3* tempimag = &temp[blockStart+blockDim.x];
if (threadIdx.x < NUM_COPIES)
w[indexInBlock] = make_mixed2(cos(-indexInBlock*2*M_PI/NUM_COPIES), sin(-indexInBlock*2*M_PI/NUM_COPIES));
__syncthreads();
for (int particle = (blockIdx.x*blockDim.x+threadIdx.x)/NUM_COPIES; particle < NUM_ATOMS; particle += numBlocks) {
// Load the particle position.
mixed4 particlePosq = posq[particle+indexInBlock*PADDED_NUM_ATOMS];
qreal[indexInBlock] = make_mixed3(particlePosq.x, particlePosq.y, particlePosq.z);
qimag[indexInBlock] = make_mixed3(0);
// Forward FFT.
__syncthreads();
FFT_Q_FORWARD
if (NUM_CONTRACTED_COPIES > 1) {
// Compress the data to remove high frequencies.
int start = (NUM_CONTRACTED_COPIES+1)/2;
tempreal[indexInBlock] = qreal[indexInBlock];
tempimag[indexInBlock] = qimag[indexInBlock];
__syncthreads();
if (indexInBlock < NUM_CONTRACTED_COPIES) {
qreal[indexInBlock] = tempreal[indexInBlock < start ? indexInBlock : indexInBlock+(NUM_COPIES-NUM_CONTRACTED_COPIES)];
qimag[indexInBlock] = tempimag[indexInBlock < start ? indexInBlock : indexInBlock+(NUM_COPIES-NUM_CONTRACTED_COPIES)];
}
__syncthreads();
FFT_Q_BACKWARD
}
// Store results.
if (indexInBlock < NUM_CONTRACTED_COPIES)
contracted[particle+indexInBlock*PADDED_NUM_ATOMS] = make_mixed4(POS_SCALE*qreal[indexInBlock].x, POS_SCALE*qreal[indexInBlock].y, POS_SCALE*qreal[indexInBlock].z, particlePosq.w);
}
}
/**
* Apply the contracted forces to all copies.
*/
extern "C" __global__ void contractForces(long long* force, long long* contracted) {
const int numBlocks = (blockDim.x*gridDim.x)/NUM_COPIES;
const int blockStart = NUM_COPIES*(threadIdx.x/NUM_COPIES);
const int indexInBlock = threadIdx.x-blockStart;
const mixed forceScale = 1/(mixed) 0x100000000;
__shared__ mixed3 f[2*THREAD_BLOCK_SIZE];
__shared__ mixed3 temp[2*THREAD_BLOCK_SIZE];
__shared__ mixed2 w[NUM_COPIES];
mixed3* freal = &f[blockStart];
mixed3* fimag = &f[blockStart+blockDim.x];
mixed3* tempreal = &temp[blockStart];
mixed3* tempimag = &temp[blockStart+blockDim.x];
if (threadIdx.x < NUM_COPIES)
w[indexInBlock] = make_mixed2(cos(-indexInBlock*2*M_PI/NUM_COPIES), sin(-indexInBlock*2*M_PI/NUM_COPIES));
__syncthreads();
for (int particle = (blockIdx.x*blockDim.x+threadIdx.x)/NUM_COPIES; particle < NUM_ATOMS; particle += numBlocks) {
// Load the force.
int forceIndex = particle+indexInBlock*PADDED_NUM_ATOMS*3;
if (indexInBlock < NUM_CONTRACTED_COPIES) {
freal[indexInBlock] = make_mixed3(contracted[forceIndex]*forceScale, contracted[forceIndex+PADDED_NUM_ATOMS]*forceScale, contracted[forceIndex+PADDED_NUM_ATOMS*2]*forceScale);
fimag[indexInBlock] = make_mixed3(0);
}
__syncthreads();
// Forward FFT.
if (NUM_CONTRACTED_COPIES > 1) {
FFT_F_FORWARD
}
// Set the high frequency components to 0.
int start = (NUM_CONTRACTED_COPIES+1)/2;
int end = NUM_COPIES-NUM_CONTRACTED_COPIES+start;
tempreal[indexInBlock] = freal[indexInBlock];
tempimag[indexInBlock] = fimag[indexInBlock];
__syncthreads();
if (indexInBlock >= start) {
freal[indexInBlock] = (indexInBlock < end ? make_mixed3(0) : tempreal[indexInBlock-(NUM_COPIES-NUM_CONTRACTED_COPIES)]);
fimag[indexInBlock] = (indexInBlock < end ? make_mixed3(0) : tempimag[indexInBlock-(NUM_COPIES-NUM_CONTRACTED_COPIES)]);
}
__syncthreads();
FFT_F_BACKWARD
// Store results.
force[forceIndex] = (long long) (FORCE_SCALE*freal[indexInBlock].x);
force[forceIndex+PADDED_NUM_ATOMS] = (long long) (FORCE_SCALE*freal[indexInBlock].y);
force[forceIndex+PADDED_NUM_ATOMS*2] = (long long) (FORCE_SCALE*freal[indexInBlock].z);
}
}
|
5f9832d2f253542d8c3b8ec3cb8876be9e2dd0ef.cu
|
__device__ mixed3 multiplyComplexRealPart(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2r-c1.y*c2i;
}
__device__ mixed3 multiplyComplexImagPart(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2i+c1.y*c2r;
}
__device__ mixed3 multiplyComplexRealPartConj(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2r+c1.y*c2i;
}
__device__ mixed3 multiplyComplexImagPartConj(mixed2 c1, mixed3 c2r, mixed3 c2i) {
return c1.x*c2i-c1.y*c2r;
}
/**
* Compute the contracted positions
*/
extern "C" __global__ void contractPositions(mixed4* posq, mixed4* contracted) {
const int numBlocks = (blockDim.x*gridDim.x)/NUM_COPIES;
const int blockStart = NUM_COPIES*(threadIdx.x/NUM_COPIES);
const int indexInBlock = threadIdx.x-blockStart;
__shared__ mixed3 q[2*THREAD_BLOCK_SIZE];
__shared__ mixed3 temp[2*THREAD_BLOCK_SIZE];
__shared__ mixed2 w[NUM_COPIES];
mixed3* qreal = &q[blockStart];
mixed3* qimag = &q[blockStart+blockDim.x];
mixed3* tempreal = &temp[blockStart];
mixed3* tempimag = &temp[blockStart+blockDim.x];
if (threadIdx.x < NUM_COPIES)
w[indexInBlock] = make_mixed2(cos(-indexInBlock*2*M_PI/NUM_COPIES), sin(-indexInBlock*2*M_PI/NUM_COPIES));
__syncthreads();
for (int particle = (blockIdx.x*blockDim.x+threadIdx.x)/NUM_COPIES; particle < NUM_ATOMS; particle += numBlocks) {
// Load the particle position.
mixed4 particlePosq = posq[particle+indexInBlock*PADDED_NUM_ATOMS];
qreal[indexInBlock] = make_mixed3(particlePosq.x, particlePosq.y, particlePosq.z);
qimag[indexInBlock] = make_mixed3(0);
// Forward FFT.
__syncthreads();
FFT_Q_FORWARD
if (NUM_CONTRACTED_COPIES > 1) {
// Compress the data to remove high frequencies.
int start = (NUM_CONTRACTED_COPIES+1)/2;
tempreal[indexInBlock] = qreal[indexInBlock];
tempimag[indexInBlock] = qimag[indexInBlock];
__syncthreads();
if (indexInBlock < NUM_CONTRACTED_COPIES) {
qreal[indexInBlock] = tempreal[indexInBlock < start ? indexInBlock : indexInBlock+(NUM_COPIES-NUM_CONTRACTED_COPIES)];
qimag[indexInBlock] = tempimag[indexInBlock < start ? indexInBlock : indexInBlock+(NUM_COPIES-NUM_CONTRACTED_COPIES)];
}
__syncthreads();
FFT_Q_BACKWARD
}
// Store results.
if (indexInBlock < NUM_CONTRACTED_COPIES)
contracted[particle+indexInBlock*PADDED_NUM_ATOMS] = make_mixed4(POS_SCALE*qreal[indexInBlock].x, POS_SCALE*qreal[indexInBlock].y, POS_SCALE*qreal[indexInBlock].z, particlePosq.w);
}
}
/**
* Apply the contracted forces to all copies.
*/
extern "C" __global__ void contractForces(long long* force, long long* contracted) {
const int numBlocks = (blockDim.x*gridDim.x)/NUM_COPIES;
const int blockStart = NUM_COPIES*(threadIdx.x/NUM_COPIES);
const int indexInBlock = threadIdx.x-blockStart;
const mixed forceScale = 1/(mixed) 0x100000000;
__shared__ mixed3 f[2*THREAD_BLOCK_SIZE];
__shared__ mixed3 temp[2*THREAD_BLOCK_SIZE];
__shared__ mixed2 w[NUM_COPIES];
mixed3* freal = &f[blockStart];
mixed3* fimag = &f[blockStart+blockDim.x];
mixed3* tempreal = &temp[blockStart];
mixed3* tempimag = &temp[blockStart+blockDim.x];
if (threadIdx.x < NUM_COPIES)
w[indexInBlock] = make_mixed2(cos(-indexInBlock*2*M_PI/NUM_COPIES), sin(-indexInBlock*2*M_PI/NUM_COPIES));
__syncthreads();
for (int particle = (blockIdx.x*blockDim.x+threadIdx.x)/NUM_COPIES; particle < NUM_ATOMS; particle += numBlocks) {
// Load the force.
int forceIndex = particle+indexInBlock*PADDED_NUM_ATOMS*3;
if (indexInBlock < NUM_CONTRACTED_COPIES) {
freal[indexInBlock] = make_mixed3(contracted[forceIndex]*forceScale, contracted[forceIndex+PADDED_NUM_ATOMS]*forceScale, contracted[forceIndex+PADDED_NUM_ATOMS*2]*forceScale);
fimag[indexInBlock] = make_mixed3(0);
}
__syncthreads();
// Forward FFT.
if (NUM_CONTRACTED_COPIES > 1) {
FFT_F_FORWARD
}
// Set the high frequency components to 0.
int start = (NUM_CONTRACTED_COPIES+1)/2;
int end = NUM_COPIES-NUM_CONTRACTED_COPIES+start;
tempreal[indexInBlock] = freal[indexInBlock];
tempimag[indexInBlock] = fimag[indexInBlock];
__syncthreads();
if (indexInBlock >= start) {
freal[indexInBlock] = (indexInBlock < end ? make_mixed3(0) : tempreal[indexInBlock-(NUM_COPIES-NUM_CONTRACTED_COPIES)]);
fimag[indexInBlock] = (indexInBlock < end ? make_mixed3(0) : tempimag[indexInBlock-(NUM_COPIES-NUM_CONTRACTED_COPIES)]);
}
__syncthreads();
FFT_F_BACKWARD
// Store results.
force[forceIndex] = (long long) (FORCE_SCALE*freal[indexInBlock].x);
force[forceIndex+PADDED_NUM_ATOMS] = (long long) (FORCE_SCALE*freal[indexInBlock].y);
force[forceIndex+PADDED_NUM_ATOMS*2] = (long long) (FORCE_SCALE*freal[indexInBlock].z);
}
}
|
96a849aa6b81e2c1c411d7e51d79172cb29bd7d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "iostream"
#include "math.h"
#include "fstream"
#include "ostream"
#include "sstream"
#include <iomanip>
#include <vector>
#include <algorithm>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include"hip/device_functions.h"
#include "common_hip.cuh"
#include"grid.cuh"
#include "particle.cuh"
#include "UPML.cuh"
#include "inteper.cuh"
#include "static_electric.cuh"
#include "static_magnetic.cuh"
#include "lty_paralle_common.cuh"
#include "hiprand/hiprand_kernel.h"
#include <stdlib.h>
#include <stdio.h>
#include "hiprand/hiprand.h"
#include"variable.cuh"
using namespace std;
int main()
{
int tmp = 5000;
hipDeviceReset();
curandnumber(tmp);
void data_save(Paticle *pat_elc,Paticle *pat_ion);
void data_save_ion(Paticle *pat_elc, Paticle *pat_ion);//
//void data_save(Paticle *pat_elc);
void data_save(Grid* G_GPU);
void current_save(Grid* G);
char* indir="..\\data\\input.txt";
parameter p(indir);
initial_para(p);//
//cout<<qe<<" "<<Me<<endl;
sigmaz1=one_array_malloc(nzz);
sigmaz =one_array_malloc(nzz);
G_gpu = new Grid[nxx*nzz];//
pat_elc=new Paticle[lizi_count];//GPU
pat_ion=new Paticle[lizi_count];//GPU
hipError_t cudaStatus;
hipMalloc((void**)&device_G,nxx*nzz*sizeof(Grid));
hipMalloc((void**)&dev_Gtemp, nxx*nzz*sizeof(Grid));
hipMalloc((void**)&device_Gn,nxx*nzz*sizeof(Grid));
//hipMalloc((void**)&device_tail,sizeof(int));//
cudaStatus=hipMalloc((void**)&d_pat_elc,lizi_count*sizeof(Paticle));
cudaStatus = hipMalloc((void**)&d_pre_elc, lizi_count*sizeof(Pre_Paticle));// GPU
cudaStatus=hipMalloc((void**)&d_pat_ion,lizi_count*sizeof(Paticle));
cudaStatus = hipMalloc((void**)&d_pre_ion, lizi_count*sizeof(Pre_Paticle));// GPU
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus=hipMalloc((void**)&d_stac_Bx,nxx*nzz*sizeof(float));
cudaStatus=hipMalloc((void**)&d_stac_Bz,nxx*nzz*sizeof(float));
cudaStatus=hipMalloc((void**)&d_stac_ex,nxx*nzz*sizeof(float));
cudaStatus=hipMalloc((void**)&d_stac_ez,nxx*nzz*sizeof(float));
read_cross("E:\\cross_section_net.txt");
static_magnetic();
static_electric();
initialsigma();
cudaStatus=hipMemcpy(d_stac_ex,stac_ex,nxx*nzz*sizeof(float),hipMemcpyHostToDevice);
cudaStatus=hipMemcpy(d_stac_ez,stac_ez,nxx*nzz*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_stac_Bx,stac_Bx,nxx*nzz*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_stac_Bz,stac_Bz,nxx*nzz*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( device_initialchang), dim3(block),dim3(thread), 0, 0, device_G,device_Gn);
hipLaunchKernelGGL(( kernel_L_InitialPML), dim3(block),dim3(thread), 0, 0, nxx, nzz);
cudaStatus = hipMalloc((void**)&d_sigmaz1, nzz*sizeof(float));
cudaStatus = hipMalloc((void**)&d_sigmaz, nzz*sizeof(float));
cudaStatus = hipMemcpy(d_sigmaz1, sigmaz1, nzz*sizeof(float), hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(d_sigmaz, sigmaz, nzz*sizeof(float), hipMemcpyHostToDevice);
clock_t start=0, end=0;
ofstream time("D:\\PIC\\time.txt",ios::app);
hipLaunchKernelGGL(( initial_always), dim3(block),dim3(thread), 0, 0, d_pat_elc,lizi_count,DS_number,tmp);//
initial_always << <block, thread >> >(d_pat_ion, lizi_count, DS_number,tmp);//
device_define_G << <block,thread >> >(nxx, nzz, device_Gn, device_G);
start = clock();
for(int t=0;t<1;t++)
{
host_temptail+=30;
//device_ave_field<<<block,thread>>>(nxx,nzz,device_G,device_Gn);
device_update_last << <block, thread >> >(d_stac_Bx, d_stac_Bz, d_stac_ex, d_stac_ez, d_pat_elc,d_pre_elc,device_G,DS_number,host_temptail);
//device_update_ion << <block, thread >> >(d_stac_Bx, d_stac_Bz, d_stac_ex, d_stac_ez, d_pat_ion,d_pre_ion,device_G,DS_number,host_temptail);
//hipMemcpy(wuchafenxi, (d_pat_elc), 30 * sizeof(Paticle), hipMemcpyDeviceToHost);
//data_save(wuchafenxi);//30
current << <block, thread >> >(d_pat_elc, d_pre_elc, device_G, DS_number, host_temptail,t);
//current_ion << <block, thread >> >(d_pat_ion, d_pre_ion, device_G, DS_number, host_temptail,t);
/*hipMemcpy(G_gpu, device_G, nxx*nzz*sizeof(Grid), hipMemcpyDeviceToHost);
current_save(G_gpu);*/
hipLaunchKernelGGL(( cacuchang_hx), dim3(block),dim3(thread), 0, 0, device_G,device_Gn,d_sigmaz1,d_sigmaz,dt,dr,dz,nxx,nzz);
cacuchang_hy << <block,thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_hz<< <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ex << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ey << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ez << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
hipLaunchKernelGGL(( device_define_G), dim3(block),dim3(thread), 0, 0, nxx,nzz,device_Gn,device_G);
/*end = clock();
cout<<t<<"\t"<<"\t"<<(float)(end - start) / CLOCKS_PER_SEC<<"S"<<endl;
time<<t<<" Szabo1 Run time: "<<(float)(end - start) / CLOCKS_PER_SEC<<"S"<<endl;*/
}
end = clock();
cout<< (float)(end - start) / CLOCKS_PER_SEC << "S" << endl;
hipMemcpy(G_gpu,device_G, nxx*nzz*sizeof(Grid), hipMemcpyDeviceToHost);
data_save(G_gpu);//
cudaStatus=hipMemcpy(pat_elc,d_pat_elc,lizi_count*sizeof(Paticle),hipMemcpyDeviceToHost);
cudaStatus=hipMemcpy(pat_ion,d_pat_ion,lizi_count*sizeof(Paticle),hipMemcpyDeviceToHost);
data_save(pat_elc,pat_ion);//
data_save_ion(pat_ion,pat_elc);//
hipFree(device_G);
hipFree(device_Gn);
hipFree(d_pat_elc);
hipFree(d_pat_ion);
hipFree(d_rds);
hipFree(d_rds1);
hipFree(d_stac_Bx);
hipFree(d_stac_Bz);
hipFree(d_stac_ex);
hipFree(d_stac_ez);
system("pause");
return 0;
}
void data_save(Paticle *pat_elc,Paticle *pat_ion)
{
/*************************************************/
time_inter=1000;
//if(t%time_inter==0)//time_inter
char s_ele[1000];
sprintf_s(s_ele,"D:\\PIC\\e\\bingxing1.txt");
ofstream on_ele(s_ele);
for(int i=0;i<host_temptail;i++)
{
on_ele<<pat_elc[i].pr<<"\t"<<pat_elc[i].py<<"\t"<<pat_elc[i].pz<<"\t"<<pat_elc[i].vr<<"\t"<<pat_elc[i].vy<<"\t"<<pat_elc[i].vz<<endl;
}
}
void data_save_ion(Paticle *pat_elc, Paticle *pat_ion)
{
/*************************************************/
time_inter = 1000;
//if(t%time_inter==0)//time_inter
//{
char s_ele[1000];
sprintf_s(s_ele, "D:\\PIC\\e\\bingxing2.txt");
ofstream on_ele(s_ele);
for (int i = 0; i<host_temptail; i++)
{
on_ele << pat_elc[i].pr << "\t" << pat_elc[i].py << "\t" << pat_elc[i].pz << "\t" << pat_elc[i].vr << "\t" << pat_elc[i].vy << "\t" << pat_elc[i].vz << endl;
}
}
void data_save(Paticle *pat_elc)
{
/*************************************************/
time_inter = 1000;
char s_ele[1000];
sprintf_s(s_ele, "D:\\PIC\\e\\wuchafenxi0.txt");
ofstream on_ele(s_ele, ofstream::app);
for (int i = 0; i<30; i++)
{
on_ele << pat_elc[i].pr << "\t" << pat_elc[i].py << "\t" << pat_elc[i].pz << "\t" << pat_elc[i].vr << "\t" << pat_elc[i].vy << "\t" << pat_elc[i].vz << endl;
}
}
void data_save(Grid* G_GPU)
{
ofstream out_chang("D:\\PIC\\e\\chang_gpu.txt");
if (out_chang)
{
for (int i = 0; i < nxx*nzz; i++)
out_chang << G_GPU[i].ex << " " << G_GPU[i].ey << " " << G_GPU[i].ez << " "
<< G_GPU[i].hx << " " << G_GPU[i].hy << " " << G_GPU[i].hz << endl;
}
}
void current_save(Grid* G_GPU)
{
ofstream out_chang("D:\\PIC\\e\\current_gpu.txt");
if (out_chang)
{
for (int i = 0; i < nxx*nzz; i++)
out_chang << G_GPU[i].jr << " " << G_GPU[i].jy << " " << G_GPU[i].jz <<endl;
}
}
//void curandnumber(int n)
//{
// hipMalloc((void**)&DS_number,n*sizeof(float));
// HS_number=one_array_malloc(n);
// srand(1);
// /*for (int i = 0; i < n; i++)
// {
// HS_number[i] = (float)rand() / ((float)RAND_MAX);
// }*/
//
//float HS_number[30] = { 0.00125126,0.193304,0.585009,
// 0.350291,0.82284,0.174108,0.710501,0.303995,0.0914029,
// 0.147313,0.988525,0.119083,0.0089114,0.531663,0.601764,
// 0.166234,0.450789,0.0570391,0.783319,0.519883,0.875973,
// 0.955901,0.539354,0.462081,0.862239,0.779656,0.996796,0.611499,
// 0.266213,0.840144 };
//hipMemcpy(DS_number,HS_number,n*sizeof(float),hipMemcpyHostToDevice);
//}
//5000
void curandnumber(int n)
{
hipMalloc((void**)&DS_number, n*sizeof(float));
HS_number = one_array_malloc(n);
srand(1);
for (int i = 0; i < n; i++)
{
HS_number[i] = (float)rand() / ((float)RAND_MAX);
}
hipMemcpy(DS_number, HS_number, n*sizeof(float), hipMemcpyHostToDevice);
}
|
96a849aa6b81e2c1c411d7e51d79172cb29bd7d2.cu
|
#include "iostream"
#include "math.h"
#include "fstream"
#include "ostream"
#include "sstream"
#include <iomanip>
#include <vector>
#include <algorithm>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include"device_functions.h"
#include "common.cuh"
#include"grid.cuh"
#include "particle.cuh"
#include "UPML.cuh"
#include "inteper.cuh"
#include "static_electric.cuh"
#include "static_magnetic.cuh"
#include "lty_paralle_common.cuh"
#include "curand_kernel.h"
#include <stdlib.h>
#include <stdio.h>
#include "curand.h"
#include"variable.cuh"
using namespace std;
int main()
{
int tmp = 5000;
cudaDeviceReset();
curandnumber(tmp);
void data_save(Paticle *pat_elc,Paticle *pat_ion);
void data_save_ion(Paticle *pat_elc, Paticle *pat_ion);//最后时刻粒子信息
//void data_save(Paticle *pat_elc);
void data_save(Grid* G_GPU);
void current_save(Grid* G);
char* indir="..\\data\\input.txt";
parameter p(indir);
initial_para(p);//初始化参数
//cout<<qe<<" "<<Me<<endl;
sigmaz1=one_array_malloc(nzz);
sigmaz =one_array_malloc(nzz);
G_gpu = new Grid[nxx*nzz];//仅仅用于验证场的正确性 数据回拷
pat_elc=new Paticle[lizi_count];//主机上的粒子信息,用于接受GPU算完之后的结果
pat_ion=new Paticle[lizi_count];//主机上的粒子信息,用于接受GPU算完之后的结果
cudaError_t cudaStatus;
cudaMalloc((void**)&device_G,nxx*nzz*sizeof(Grid));
cudaMalloc((void**)&dev_Gtemp, nxx*nzz*sizeof(Grid));
cudaMalloc((void**)&device_Gn,nxx*nzz*sizeof(Grid));
//cudaMalloc((void**)&device_tail,sizeof(int));//用于后移线程索引
cudaStatus=cudaMalloc((void**)&d_pat_elc,lizi_count*sizeof(Paticle));
cudaStatus = cudaMalloc((void**)&d_pre_elc, lizi_count*sizeof(Pre_Paticle));//总的模拟粒子数目 GPU上
cudaStatus=cudaMalloc((void**)&d_pat_ion,lizi_count*sizeof(Paticle));
cudaStatus = cudaMalloc((void**)&d_pre_ion, lizi_count*sizeof(Pre_Paticle));//总的模拟粒子数目 GPU上
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus=cudaMalloc((void**)&d_stac_Bx,nxx*nzz*sizeof(float));
cudaStatus=cudaMalloc((void**)&d_stac_Bz,nxx*nzz*sizeof(float));
cudaStatus=cudaMalloc((void**)&d_stac_ex,nxx*nzz*sizeof(float));
cudaStatus=cudaMalloc((void**)&d_stac_ez,nxx*nzz*sizeof(float));
read_cross("E:\\cross_section_net.txt");
static_magnetic();
static_electric();
initialsigma();
cudaStatus=cudaMemcpy(d_stac_ex,stac_ex,nxx*nzz*sizeof(float),cudaMemcpyHostToDevice);
cudaStatus=cudaMemcpy(d_stac_ez,stac_ez,nxx*nzz*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_stac_Bx,stac_Bx,nxx*nzz*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_stac_Bz,stac_Bz,nxx*nzz*sizeof(float),cudaMemcpyHostToDevice);
device_initialchang<<<block,thread>>>(device_G,device_Gn);
kernel_L_InitialPML<<<block,thread>>>(nxx, nzz);
cudaStatus = cudaMalloc((void**)&d_sigmaz1, nzz*sizeof(float));
cudaStatus = cudaMalloc((void**)&d_sigmaz, nzz*sizeof(float));
cudaStatus = cudaMemcpy(d_sigmaz1, sigmaz1, nzz*sizeof(float), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(d_sigmaz, sigmaz, nzz*sizeof(float), cudaMemcpyHostToDevice);
clock_t start=0, end=0;
ofstream time("D:\\PIC\\time.txt",ios::app);
initial_always<<<block,thread>>>(d_pat_elc,lizi_count,DS_number,tmp);//电子初始化
initial_always << <block, thread >> >(d_pat_ion, lizi_count, DS_number,tmp);//离子初始化
device_define_G << <block,thread >> >(nxx, nzz, device_Gn, device_G);
start = clock();
for(int t=0;t<1;t++)
{
host_temptail+=30;
//device_ave_field<<<block,thread>>>(nxx,nzz,device_G,device_Gn);
device_update_last << <block, thread >> >(d_stac_Bx, d_stac_Bz, d_stac_ex, d_stac_ez, d_pat_elc,d_pre_elc,device_G,DS_number,host_temptail);
//device_update_ion << <block, thread >> >(d_stac_Bx, d_stac_Bz, d_stac_ex, d_stac_ez, d_pat_ion,d_pre_ion,device_G,DS_number,host_temptail);
//cudaMemcpy(wuchafenxi, (d_pat_elc), 30 * sizeof(Paticle), cudaMemcpyDeviceToHost);
//data_save(wuchafenxi);//每个步长考出30个粒子信息作为比对,实际可以不要
current << <block, thread >> >(d_pat_elc, d_pre_elc, device_G, DS_number, host_temptail,t);
//current_ion << <block, thread >> >(d_pat_ion, d_pre_ion, device_G, DS_number, host_temptail,t);
/*cudaMemcpy(G_gpu, device_G, nxx*nzz*sizeof(Grid), cudaMemcpyDeviceToHost);
current_save(G_gpu);*/
cacuchang_hx<<<block,thread>>>(device_G,device_Gn,d_sigmaz1,d_sigmaz,dt,dr,dz,nxx,nzz);
cacuchang_hy << <block,thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_hz<< <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ex << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ey << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
cacuchang_ez << <block, thread >> >(device_G, device_Gn, d_sigmaz1, d_sigmaz, dt, dr, dz, nxx, nzz);
device_define_G<<<block,thread>>>(nxx,nzz,device_Gn,device_G);
/*end = clock();
cout<<t<<"\t"<<"\t"<<(float)(end - start) / CLOCKS_PER_SEC<<"S"<<endl;
time<<t<<" Szabo1 Run time: "<<(float)(end - start) / CLOCKS_PER_SEC<<"S"<<endl;*/
}
end = clock();
cout<< (float)(end - start) / CLOCKS_PER_SEC << "S" << endl;
cudaMemcpy(G_gpu,device_G, nxx*nzz*sizeof(Grid), cudaMemcpyDeviceToHost);
data_save(G_gpu);//最后时刻场信息
cudaStatus=cudaMemcpy(pat_elc,d_pat_elc,lizi_count*sizeof(Paticle),cudaMemcpyDeviceToHost);
cudaStatus=cudaMemcpy(pat_ion,d_pat_ion,lizi_count*sizeof(Paticle),cudaMemcpyDeviceToHost);
data_save(pat_elc,pat_ion);//最后时刻粒子信息
data_save_ion(pat_ion,pat_elc);//最后时刻粒子信息
cudaFree(device_G);
cudaFree(device_Gn);
cudaFree(d_pat_elc);
cudaFree(d_pat_ion);
cudaFree(d_rds);
cudaFree(d_rds1);
cudaFree(d_stac_Bx);
cudaFree(d_stac_Bz);
cudaFree(d_stac_ex);
cudaFree(d_stac_ez);
system("pause");
return 0;
}
void data_save(Paticle *pat_elc,Paticle *pat_ion)
{
/*************************************************/
time_inter=1000;
//if(t%time_inter==0)//time_inter
char s_ele[1000];
sprintf_s(s_ele,"D:\\PIC\\输出e\\bingxing1.txt");
ofstream on_ele(s_ele);
for(int i=0;i<host_temptail;i++)
{
on_ele<<pat_elc[i].pr<<"\t"<<pat_elc[i].py<<"\t"<<pat_elc[i].pz<<"\t"<<pat_elc[i].vr<<"\t"<<pat_elc[i].vy<<"\t"<<pat_elc[i].vz<<endl;
}
}
void data_save_ion(Paticle *pat_elc, Paticle *pat_ion)
{
/*************************************************/
time_inter = 1000;
//if(t%time_inter==0)//time_inter
//{
char s_ele[1000];
sprintf_s(s_ele, "D:\\PIC\\输出e\\bingxing2.txt");
ofstream on_ele(s_ele);
for (int i = 0; i<host_temptail; i++)
{
on_ele << pat_elc[i].pr << "\t" << pat_elc[i].py << "\t" << pat_elc[i].pz << "\t" << pat_elc[i].vr << "\t" << pat_elc[i].vy << "\t" << pat_elc[i].vz << endl;
}
}
void data_save(Paticle *pat_elc)
{
/*************************************************/
time_inter = 1000;
char s_ele[1000];
sprintf_s(s_ele, "D:\\PIC\\输出e\\wuchafenxi0.txt");
ofstream on_ele(s_ele, ofstream::app);
for (int i = 0; i<30; i++)
{
on_ele << pat_elc[i].pr << "\t" << pat_elc[i].py << "\t" << pat_elc[i].pz << "\t" << pat_elc[i].vr << "\t" << pat_elc[i].vy << "\t" << pat_elc[i].vz << endl;
}
}
void data_save(Grid* G_GPU)
{
ofstream out_chang("D:\\PIC\\输出e\\chang_gpu.txt");
if (out_chang)
{
for (int i = 0; i < nxx*nzz; i++)
out_chang << G_GPU[i].ex << " " << G_GPU[i].ey << " " << G_GPU[i].ez << " "
<< G_GPU[i].hx << " " << G_GPU[i].hy << " " << G_GPU[i].hz << endl;
}
}
void current_save(Grid* G_GPU)
{
ofstream out_chang("D:\\PIC\\输出e\\current_gpu.txt");
if (out_chang)
{
for (int i = 0; i < nxx*nzz; i++)
out_chang << G_GPU[i].jr << " " << G_GPU[i].jy << " " << G_GPU[i].jz <<endl;
}
}
//void curandnumber(int n)
//{
// cudaMalloc((void**)&DS_number,n*sizeof(float));
// HS_number=one_array_malloc(n);
// srand(1);
// /*for (int i = 0; i < n; i++)
// {
// HS_number[i] = (float)rand() / ((float)RAND_MAX);
// }*/
//
//float HS_number[30] = { 0.00125126,0.193304,0.585009,
// 0.350291,0.82284,0.174108,0.710501,0.303995,0.0914029,
// 0.147313,0.988525,0.119083,0.0089114,0.531663,0.601764,
// 0.166234,0.450789,0.0570391,0.783319,0.519883,0.875973,
// 0.955901,0.539354,0.462081,0.862239,0.779656,0.996796,0.611499,
// 0.266213,0.840144 };
//cudaMemcpy(DS_number,HS_number,n*sizeof(float),cudaMemcpyHostToDevice);
//}
//5000个随机数
void curandnumber(int n)
{
cudaMalloc((void**)&DS_number, n*sizeof(float));
HS_number = one_array_malloc(n);
srand(1);
for (int i = 0; i < n; i++)
{
HS_number[i] = (float)rand() / ((float)RAND_MAX);
}
cudaMemcpy(DS_number, HS_number, n*sizeof(float), cudaMemcpyHostToDevice);
}
|
246d410a3f7f5fe6d2a2c9fb4fb49080bedcf43a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
magmaDoubleComplex *A1;
magmaDoubleComplex *A2;
int n, lda1, lda2;
} magmagpu_zlacpy_cnjg_params_t;
__global__ void magmagpu_zlacpy_cnjg( magmagpu_zlacpy_cnjg_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
magmaDoubleComplex *A1 = params.A1 + offset1;
magmaDoubleComplex *A2 = params.A2 + offset2;
*A2 = MAGMA_Z_CNJG(*A1);
}
}
extern "C" void
magmablas_zlacpy_cnjg_q(
magma_int_t n, magmaDoubleComplex *dA1, magma_int_t lda1,
magmaDoubleComplex *dA2, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_zlacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 };
hipLaunchKernelGGL(( magmagpu_zlacpy_cnjg), dim3(blocks), dim3(blocksize), 0, queue , params );
}
extern "C" void
magmablas_zlacpy_cnjg(
magma_int_t n, magmaDoubleComplex *dA1, magma_int_t lda1,
magmaDoubleComplex *dA2, magma_int_t lda2)
{
magmablas_zlacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream );
}
|
246d410a3f7f5fe6d2a2c9fb4fb49080bedcf43a.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
magmaDoubleComplex *A1;
magmaDoubleComplex *A2;
int n, lda1, lda2;
} magmagpu_zlacpy_cnjg_params_t;
__global__ void magmagpu_zlacpy_cnjg( magmagpu_zlacpy_cnjg_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
magmaDoubleComplex *A1 = params.A1 + offset1;
magmaDoubleComplex *A2 = params.A2 + offset2;
*A2 = MAGMA_Z_CNJG(*A1);
}
}
extern "C" void
magmablas_zlacpy_cnjg_q(
magma_int_t n, magmaDoubleComplex *dA1, magma_int_t lda1,
magmaDoubleComplex *dA2, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_zlacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 };
magmagpu_zlacpy_cnjg<<< blocks, blocksize, 0, queue >>>( params );
}
extern "C" void
magmablas_zlacpy_cnjg(
magma_int_t n, magmaDoubleComplex *dA1, magma_int_t lda1,
magmaDoubleComplex *dA2, magma_int_t lda2)
{
magmablas_zlacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream );
}
|
e3325a58f76b422c28f8b839ab185a9003a65ba6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
#include <hip/hip_fp16.h>
extern "C"
static __device__ __noinline__ void u16_atomics(
uint16_t *out, uint16_t *slm, uint16_t arg)
// unsigned short int *out, unsigned short int arg)
{
//unsigned short int atomicCAS(unsigned short int *address,
// unsigned short int compare,
// unsigned short int val);
// *out = atomicCAS(out, arg, (uint16_t)(arg+1));
// *out = atomicCAS(out, arg, (unsigned short int)(arg+1));
asm volatile("atom.shared.cas.b16 %0, [%1], %2, %3;" :
"=h"(arg) : "l"(slm), "h"(arg), "h"((uint16_t)(arg+1)));
asm volatile("atom.global.cas.b16 %0, [%1], %2, %3;" :
"=h"(arg) : "l"(out), "h"(arg), "h"((uint16_t)(arg+1)));
// no 16b op
// asm("red.global.cas.b16 [%0], %1, %2;" :: "l"(out), "h"(arg), "h"((uint16_t)(arg+1)));
}
extern "C"
static __device__ __noinline__ void s32_atomics(
int32_t *out, int32_t *_slm, int32_t arg)
{
__shared__ int32_t slm[32];
slm[threadIdx.x] = *out + 4;
__syncthreads();
arg = atomicCAS(slm+arg%32, arg+1, arg);
//
arg = atomicExch(slm+arg%32, arg);
(void)atomicExch(slm+arg%32, arg);
//
arg = atomicMin(slm+arg%32, arg);
(void)atomicMin(slm+arg%32, arg);
arg = atomicMax(slm+arg%32, arg);
(void)atomicMax(slm+arg%32, arg);
//
// (void)atomicInc(slm, arg);
// (void)atomicDec(slm, arg);
//
arg = atomicAdd(slm+arg%32, arg);
(void)atomicAdd(slm+arg%32, arg);
(void)atomicSub(slm+arg%32, arg);
arg += slm[arg % 32];
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
//
arg = atomicExch(out, arg);
(void)atomicExch(out, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
// (void)atomicInc(out, arg);
// (void)atomicDec(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
(void)atomicSub(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
arg = atomicCAS(slm, arg+1, arg);
(void)atomicExch(slm, arg);
//
arg = atomicMin(slm, arg);
(void)atomicMin(slm, arg);
arg = atomicMax(slm, arg);
(void)atomicMax(slm, arg);
//
arg = atomicInc(slm, arg);
(void)atomicInc(slm, arg);
arg = atomicDec(slm, arg);
(void)atomicDec(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
arg = atomicSub(slm, arg);
(void)atomicSub(slm, arg);
//
arg = atomicAnd(slm, arg);
(void)atomicAnd(slm, arg);
arg = atomicXor(slm, arg);
(void)atomicXor(slm, arg);
arg = atomicOr(slm, arg);
(void)atomicOr(slm, arg);
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
(void)atomicExch(out, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
arg = atomicInc(out, arg);
(void)atomicInc(out, arg);
arg = atomicDec(out, arg);
(void)atomicDec(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
arg = atomicSub(out, arg);
(void)atomicSub(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics_system(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
(void)atomicExch_system(slm, arg);
arg = atomicCAS_system(slm, arg+1, arg);
//
arg = atomicMin_system(slm, arg);
(void)atomicMin_system(slm, arg);
arg = atomicMax_system(slm, arg);
(void)atomicMax_system(slm, arg);
//
arg = atomicInc_system(slm, arg);
(void)atomicInc_system(slm, arg);
arg = atomicDec_system(slm, arg);
(void)atomicDec_system(slm, arg);
//
arg = atomicAdd_system(slm, arg);
(void)atomicAdd_system(slm, arg);
(void)atomicSub_system(slm, arg);
//
arg = atomicAnd_system(slm, arg);
(void)atomicAnd_system(slm, arg);
arg = atomicXor_system(slm, arg);
(void)atomicXor_system(slm, arg);
arg = atomicOr_system(slm, arg);
(void)atomicOr_system(slm, arg);
__syncthreads();
(void)atomicExch_system(out, arg);
arg = atomicCAS_system(out, arg+1, arg);
//
arg = atomicMin_system(out, arg);
(void)atomicMin_system(out, arg);
arg = atomicMax_system(out, arg);
(void)atomicMax_system(out, arg);
//
arg = atomicInc_system(out, arg);
(void)atomicInc_system(out, arg);
arg = atomicDec_system(out, arg);
(void)atomicDec_system(out, arg);
//
arg = atomicAdd_system(out, arg);
(void)atomicAdd_system(out, arg);
(void)atomicSub_system(out, arg);
//
arg = atomicAnd_system(out, arg);
(void)atomicAnd_system(out, arg);
arg = atomicXor_system(out, arg);
(void)atomicXor_system(out, arg);
arg = atomicOr_system(out, arg);
(void)atomicOr_system(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics_block(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
arg = atomicCAS_block(slm, arg+1, arg);
(void)atomicExch_block(slm, arg);
//
(void)atomicMin_block(slm, arg);
(void)atomicMax_block(slm, arg);
//
arg = atomicInc_block(slm, arg);
(void)atomicInc_block(slm, arg);
arg = atomicDec_block(slm, arg);
(void)atomicDec_block(slm, arg);
//
arg = atomicAdd_block(slm, arg);
(void)atomicAdd_block(slm, arg);
(void)atomicSub_block(slm, arg);
//
arg = atomicAnd_block(slm, arg);
(void)atomicAnd_block(slm, arg);
arg = atomicXor_block(slm, arg);
(void)atomicXor_block(slm, arg);
arg = atomicOr_block(slm, arg);
(void)atomicOr_block(slm, arg);
__syncthreads();
arg = atomicCAS_block(out, arg+1, arg);
(void)atomicExch_block(out, arg);
//
(void)atomicMin_block(out, arg);
(void)atomicMax_block(out, arg);
//
arg = atomicInc_block(out, arg);
(void)atomicInc_block(out, arg);
arg = atomicDec_block(out, arg);
(void)atomicDec_block(out, arg);
//
arg = atomicAdd_block(out, arg);
(void)atomicAdd_block(out, arg);
(void)atomicSub_block(out, arg);
//
arg = atomicAnd_block(out, arg);
(void)atomicAnd_block(out, arg);
arg = atomicXor_block(out, arg);
(void)atomicXor_block(out, arg);
arg = atomicOr_block(out, arg);
(void)atomicOr_block(out, arg);
}
extern "C"
static __device__ __noinline__ void u64_atomics(
uint64_t *out, uint64_t *slm, uint64_t arg)
{
arg = atomicCAS(slm, arg+1, arg);
//
arg = atomicMin(slm, arg);
(void)atomicMin(slm, arg);
arg = atomicMax(slm, arg);
(void)atomicMax(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
//
arg = atomicAnd(slm, arg);
(void)atomicAnd(slm, arg);
arg = atomicXor(slm, arg);
(void)atomicXor(slm, arg);
arg = atomicOr(slm, arg);
(void)atomicOr(slm, arg);
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
static __device__ __noinline__ void s64_atomics(
int64_t *out, int64_t *_slm, int64_t arg)
{
__shared__ int64_t slm[32];
slm[threadIdx.x] = *out + 4;
__syncthreads();
arg = atomicMin(slm + arg%32, arg);
(void)atomicMin(slm + arg%32, arg);
arg = atomicMax(slm + arg%32, arg);
(void)atomicMax(slm + arg%32, arg);
//
// odd that they include bitwise, but not addition
// (signed and unsigned are the same for both)
//
// arg = atomicAdd(slm, arg);
// (void)atomicAdd(slm, arg);
//
arg = atomicAnd(slm + arg%32, arg);
(void)atomicAnd(slm + arg%32, arg);
arg = atomicXor(slm + arg%32, arg);
(void)atomicXor(slm + arg%32, arg);
arg = atomicOr(slm + arg%32, arg);
(void)atomicOr(slm + arg%32, arg);
__syncthreads();
// arg = atomicCAS(out, arg+1, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
// odd that they include bitwise, but not addition
// arg = atomicAdd(out, arg);
// (void)atomicAdd(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
__device__ __noinline__ void f16_atomics(
__half *out, __half *slm, __half arg)
{
// even with local SLM the compiler generates generic ATOM
// __shared__ __half slm[32];
// slm[threadIdx.x] = *(out + 1);
// __syncthreads();
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
}
extern "C"
__device__ __noinline__ void f16x2_atomics(
__half2 *out, __half2 *slm, __half2 arg)
{
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
}
extern "C"
__device__ __noinline__ void f32_atomics(
float *out, float *slm, float arg)
{
arg = atomicExch(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicExch(out, arg);
// atomicMin(out, arg);
// atomicMax(out, arg);
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
// atomicSub(out, arg);
//
// "Unimplemented feature: floating-point reduction operation"
// no f32 min
// asm("red.global.min.f32 [%0], %1;" :: "l"(out), "f"(arg));
// asm("atom.global.min.f32 %0, [%1], %2;" : "=f"(arg) : "l"(out), "f"(arg));
// *out += arg;
}
extern "C"
static __device__ __noinline__ void f64_atomics(
double *out, double *slm, double arg)
{
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
// arg = atomicExch(out, arg);
// atomicMin(out, arg);
// atomicMax(out, arg);
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
// atomicSub(out, arg);
}
extern "C" __global__ void run_atomics(
__half *f16OUT,
const __half *f16A,
__half2 *f16x2OUT,
const __half2 *f16x2A,
float *f32OUT,
const float *f32A,
double *f64OUT,
const double *f64A,
int32_t *i32OUT,
const int32_t *i32A,
uint32_t *u32OUT,
const uint32_t *u32A,
uint16_t *u16OUT,
const uint16_t *u16A,
int64_t *s64OUT,
const int64_t *s64A,
uint64_t *u64OUT,
const uint64_t *u64A)
{
__shared__ uint16_t u16_slm[32];
__shared__ uint32_t u32_slm[32];
__shared__ uint64_t u64_slm[32];
__shared__ float f32_slm[32];
__shared__ double f64_slm[32];
int id = blockDim.x * blockIdx.x + threadIdx.x;
int sid = threadIdx.x % 32;
int sid2 = ((sid+1)%32);
//
u16_slm[sid] = u16A[id];
u32_slm[sid] = u32A[id];
u64_slm[sid] = u64A[id];
f32_slm[sid] = f32A[id];
f64_slm[sid] = f64A[id];
//
__syncthreads();
//
u16_atomics(u16OUT+id, u16_slm+sid2, u16A[id]);
//
s32_atomics(i32OUT+id, (int *)u32_slm+sid2, i32A[id]);
//
u32_atomics(u32OUT+id, u32_slm+sid2, u32A[id]);
u32_atomics_system(u32OUT+id, u32_slm+sid2, u32A[id]);
u32_atomics_block(u32OUT+id, u32_slm+sid2, u32A[id]);
//
s64_atomics(s64OUT+id, (int64_t*)u64_slm+sid2, s64A[id]);
//
u64_atomics(u64OUT+id, u64_slm+sid2, u64A[id]);
//
f16_atomics(f16OUT+id, (__half *)(u16_slm+sid), f16A[id]);
//
f16x2_atomics(f16x2OUT+id, (__half2 *)u32_slm+sid2, f16x2A[id]);
//
f32_atomics(f32OUT+id, f32_slm+sid2, f32A[id]);
// f32_atomics_slm(f32_slm+sid2,f32A[id]);
//
f64_atomics(f64OUT+id, f64_slm+sid2, f64A[id]);
//
__syncthreads();
u16OUT[sid] += u16_slm[threadIdx.x];
u32OUT[sid] += u32_slm[threadIdx.x];
u64OUT[sid] += u64_slm[threadIdx.x];
f32OUT[sid] += f32_slm[threadIdx.x];
f64OUT[sid] += f64_slm[threadIdx.x];
}
/*
extern "C" __global__ void run_atomics_inline_ptx(
__half *f16OUT,
const __half *f16A,
__half2 *f16x2OUT,
const __half2 *f16x2A,
float *f32OUT,
const float *f32A,
double *f64OUT,
const double *f64A,
int32_t *i32OUT,
const int32_t *i32A,
uint32_t *u32OUT,
const uint32_t *u32A,
uint16_t *u16OUT,
const uint16_t *u16A,
int64_t *s64OUT,
const int64_t *s64A,
uint64_t *u64OUT,
const uint64_t *u64A)
{
__shared__ uint16_t u16_slm[32];
__shared__ uint32_t u32_slm[32];
__shared__ float f32_slm[32];
int id = blockDim.x * blockIdx.x + threadIdx.x;
int sid = threadIdx.x % 32;
u32_slm[sid] = u32A[id];
f32_slm[sid] = f32A[id];
__syncthreads();
uint16_t r = 0, s = 0, t = 1;
asm("atom.global.cas.b16 %0, [%1], %2, %3;" :
"+h"(r) : "l"(u16OUT+id), "h"(s), "h"(t));
asm("atom.shared.cas.b16 %0, [%1], %2, %3;" :
"+h"(r) : "l"(u16_slm+sid), "h"(s), "h"(t));
__syncthreads();
u16OUT[id] = r + s + t + u16_slm[sid];
u32OUT[id] = u32_slm[sid];
f32OUT[id] = f32_slm[sid];
}
*/
|
e3325a58f76b422c28f8b839ab185a9003a65ba6.cu
|
#include <cstdint>
#include <cuda_fp16.h>
extern "C"
static __device__ __noinline__ void u16_atomics(
uint16_t *out, uint16_t *slm, uint16_t arg)
// unsigned short int *out, unsigned short int arg)
{
//unsigned short int atomicCAS(unsigned short int *address,
// unsigned short int compare,
// unsigned short int val);
// *out = atomicCAS(out, arg, (uint16_t)(arg+1));
// *out = atomicCAS(out, arg, (unsigned short int)(arg+1));
asm volatile("atom.shared.cas.b16 %0, [%1], %2, %3;" :
"=h"(arg) : "l"(slm), "h"(arg), "h"((uint16_t)(arg+1)));
asm volatile("atom.global.cas.b16 %0, [%1], %2, %3;" :
"=h"(arg) : "l"(out), "h"(arg), "h"((uint16_t)(arg+1)));
// no 16b op
// asm("red.global.cas.b16 [%0], %1, %2;" :: "l"(out), "h"(arg), "h"((uint16_t)(arg+1)));
}
extern "C"
static __device__ __noinline__ void s32_atomics(
int32_t *out, int32_t *_slm, int32_t arg)
{
__shared__ int32_t slm[32];
slm[threadIdx.x] = *out + 4;
__syncthreads();
arg = atomicCAS(slm+arg%32, arg+1, arg);
//
arg = atomicExch(slm+arg%32, arg);
(void)atomicExch(slm+arg%32, arg);
//
arg = atomicMin(slm+arg%32, arg);
(void)atomicMin(slm+arg%32, arg);
arg = atomicMax(slm+arg%32, arg);
(void)atomicMax(slm+arg%32, arg);
//
// (void)atomicInc(slm, arg);
// (void)atomicDec(slm, arg);
//
arg = atomicAdd(slm+arg%32, arg);
(void)atomicAdd(slm+arg%32, arg);
(void)atomicSub(slm+arg%32, arg);
arg += slm[arg % 32];
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
//
arg = atomicExch(out, arg);
(void)atomicExch(out, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
// (void)atomicInc(out, arg);
// (void)atomicDec(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
(void)atomicSub(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
arg = atomicCAS(slm, arg+1, arg);
(void)atomicExch(slm, arg);
//
arg = atomicMin(slm, arg);
(void)atomicMin(slm, arg);
arg = atomicMax(slm, arg);
(void)atomicMax(slm, arg);
//
arg = atomicInc(slm, arg);
(void)atomicInc(slm, arg);
arg = atomicDec(slm, arg);
(void)atomicDec(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
arg = atomicSub(slm, arg);
(void)atomicSub(slm, arg);
//
arg = atomicAnd(slm, arg);
(void)atomicAnd(slm, arg);
arg = atomicXor(slm, arg);
(void)atomicXor(slm, arg);
arg = atomicOr(slm, arg);
(void)atomicOr(slm, arg);
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
(void)atomicExch(out, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
arg = atomicInc(out, arg);
(void)atomicInc(out, arg);
arg = atomicDec(out, arg);
(void)atomicDec(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
arg = atomicSub(out, arg);
(void)atomicSub(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics_system(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
(void)atomicExch_system(slm, arg);
arg = atomicCAS_system(slm, arg+1, arg);
//
arg = atomicMin_system(slm, arg);
(void)atomicMin_system(slm, arg);
arg = atomicMax_system(slm, arg);
(void)atomicMax_system(slm, arg);
//
arg = atomicInc_system(slm, arg);
(void)atomicInc_system(slm, arg);
arg = atomicDec_system(slm, arg);
(void)atomicDec_system(slm, arg);
//
arg = atomicAdd_system(slm, arg);
(void)atomicAdd_system(slm, arg);
(void)atomicSub_system(slm, arg);
//
arg = atomicAnd_system(slm, arg);
(void)atomicAnd_system(slm, arg);
arg = atomicXor_system(slm, arg);
(void)atomicXor_system(slm, arg);
arg = atomicOr_system(slm, arg);
(void)atomicOr_system(slm, arg);
__syncthreads();
(void)atomicExch_system(out, arg);
arg = atomicCAS_system(out, arg+1, arg);
//
arg = atomicMin_system(out, arg);
(void)atomicMin_system(out, arg);
arg = atomicMax_system(out, arg);
(void)atomicMax_system(out, arg);
//
arg = atomicInc_system(out, arg);
(void)atomicInc_system(out, arg);
arg = atomicDec_system(out, arg);
(void)atomicDec_system(out, arg);
//
arg = atomicAdd_system(out, arg);
(void)atomicAdd_system(out, arg);
(void)atomicSub_system(out, arg);
//
arg = atomicAnd_system(out, arg);
(void)atomicAnd_system(out, arg);
arg = atomicXor_system(out, arg);
(void)atomicXor_system(out, arg);
arg = atomicOr_system(out, arg);
(void)atomicOr_system(out, arg);
}
extern "C"
static __device__ __noinline__ void u32_atomics_block(
uint32_t *out, uint32_t *slm, uint32_t arg)
{
arg = atomicCAS_block(slm, arg+1, arg);
(void)atomicExch_block(slm, arg);
//
(void)atomicMin_block(slm, arg);
(void)atomicMax_block(slm, arg);
//
arg = atomicInc_block(slm, arg);
(void)atomicInc_block(slm, arg);
arg = atomicDec_block(slm, arg);
(void)atomicDec_block(slm, arg);
//
arg = atomicAdd_block(slm, arg);
(void)atomicAdd_block(slm, arg);
(void)atomicSub_block(slm, arg);
//
arg = atomicAnd_block(slm, arg);
(void)atomicAnd_block(slm, arg);
arg = atomicXor_block(slm, arg);
(void)atomicXor_block(slm, arg);
arg = atomicOr_block(slm, arg);
(void)atomicOr_block(slm, arg);
__syncthreads();
arg = atomicCAS_block(out, arg+1, arg);
(void)atomicExch_block(out, arg);
//
(void)atomicMin_block(out, arg);
(void)atomicMax_block(out, arg);
//
arg = atomicInc_block(out, arg);
(void)atomicInc_block(out, arg);
arg = atomicDec_block(out, arg);
(void)atomicDec_block(out, arg);
//
arg = atomicAdd_block(out, arg);
(void)atomicAdd_block(out, arg);
(void)atomicSub_block(out, arg);
//
arg = atomicAnd_block(out, arg);
(void)atomicAnd_block(out, arg);
arg = atomicXor_block(out, arg);
(void)atomicXor_block(out, arg);
arg = atomicOr_block(out, arg);
(void)atomicOr_block(out, arg);
}
extern "C"
static __device__ __noinline__ void u64_atomics(
uint64_t *out, uint64_t *slm, uint64_t arg)
{
arg = atomicCAS(slm, arg+1, arg);
//
arg = atomicMin(slm, arg);
(void)atomicMin(slm, arg);
arg = atomicMax(slm, arg);
(void)atomicMax(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
//
arg = atomicAnd(slm, arg);
(void)atomicAnd(slm, arg);
arg = atomicXor(slm, arg);
(void)atomicXor(slm, arg);
arg = atomicOr(slm, arg);
(void)atomicOr(slm, arg);
__syncthreads();
arg = atomicCAS(out, arg+1, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
static __device__ __noinline__ void s64_atomics(
int64_t *out, int64_t *_slm, int64_t arg)
{
__shared__ int64_t slm[32];
slm[threadIdx.x] = *out + 4;
__syncthreads();
arg = atomicMin(slm + arg%32, arg);
(void)atomicMin(slm + arg%32, arg);
arg = atomicMax(slm + arg%32, arg);
(void)atomicMax(slm + arg%32, arg);
//
// odd that they include bitwise, but not addition
// (signed and unsigned are the same for both)
//
// arg = atomicAdd(slm, arg);
// (void)atomicAdd(slm, arg);
//
arg = atomicAnd(slm + arg%32, arg);
(void)atomicAnd(slm + arg%32, arg);
arg = atomicXor(slm + arg%32, arg);
(void)atomicXor(slm + arg%32, arg);
arg = atomicOr(slm + arg%32, arg);
(void)atomicOr(slm + arg%32, arg);
__syncthreads();
// arg = atomicCAS(out, arg+1, arg);
//
arg = atomicMin(out, arg);
(void)atomicMin(out, arg);
arg = atomicMax(out, arg);
(void)atomicMax(out, arg);
//
// odd that they include bitwise, but not addition
// arg = atomicAdd(out, arg);
// (void)atomicAdd(out, arg);
//
arg = atomicAnd(out, arg);
(void)atomicAnd(out, arg);
arg = atomicXor(out, arg);
(void)atomicXor(out, arg);
arg = atomicOr(out, arg);
(void)atomicOr(out, arg);
}
extern "C"
__device__ __noinline__ void f16_atomics(
__half *out, __half *slm, __half arg)
{
// even with local SLM the compiler generates generic ATOM
// __shared__ __half slm[32];
// slm[threadIdx.x] = *(out + 1);
// __syncthreads();
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
}
extern "C"
__device__ __noinline__ void f16x2_atomics(
__half2 *out, __half2 *slm, __half2 arg)
{
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
}
extern "C"
__device__ __noinline__ void f32_atomics(
float *out, float *slm, float arg)
{
arg = atomicExch(slm, arg);
//
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
arg = atomicExch(out, arg);
// atomicMin(out, arg);
// atomicMax(out, arg);
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
// atomicSub(out, arg);
//
// "Unimplemented feature: floating-point reduction operation"
// no f32 min
// asm("red.global.min.f32 [%0], %1;" :: "l"(out), "f"(arg));
// asm("atom.global.min.f32 %0, [%1], %2;" : "=f"(arg) : "l"(out), "f"(arg));
// *out += arg;
}
extern "C"
static __device__ __noinline__ void f64_atomics(
double *out, double *slm, double arg)
{
arg = atomicAdd(slm, arg);
(void)atomicAdd(slm, arg);
__syncthreads();
// arg = atomicExch(out, arg);
// atomicMin(out, arg);
// atomicMax(out, arg);
arg = atomicAdd(out, arg);
(void)atomicAdd(out, arg);
// atomicSub(out, arg);
}
extern "C" __global__ void run_atomics(
__half *f16OUT,
const __half *f16A,
__half2 *f16x2OUT,
const __half2 *f16x2A,
float *f32OUT,
const float *f32A,
double *f64OUT,
const double *f64A,
int32_t *i32OUT,
const int32_t *i32A,
uint32_t *u32OUT,
const uint32_t *u32A,
uint16_t *u16OUT,
const uint16_t *u16A,
int64_t *s64OUT,
const int64_t *s64A,
uint64_t *u64OUT,
const uint64_t *u64A)
{
__shared__ uint16_t u16_slm[32];
__shared__ uint32_t u32_slm[32];
__shared__ uint64_t u64_slm[32];
__shared__ float f32_slm[32];
__shared__ double f64_slm[32];
int id = blockDim.x * blockIdx.x + threadIdx.x;
int sid = threadIdx.x % 32;
int sid2 = ((sid+1)%32);
//
u16_slm[sid] = u16A[id];
u32_slm[sid] = u32A[id];
u64_slm[sid] = u64A[id];
f32_slm[sid] = f32A[id];
f64_slm[sid] = f64A[id];
//
__syncthreads();
//
u16_atomics(u16OUT+id, u16_slm+sid2, u16A[id]);
//
s32_atomics(i32OUT+id, (int *)u32_slm+sid2, i32A[id]);
//
u32_atomics(u32OUT+id, u32_slm+sid2, u32A[id]);
u32_atomics_system(u32OUT+id, u32_slm+sid2, u32A[id]);
u32_atomics_block(u32OUT+id, u32_slm+sid2, u32A[id]);
//
s64_atomics(s64OUT+id, (int64_t*)u64_slm+sid2, s64A[id]);
//
u64_atomics(u64OUT+id, u64_slm+sid2, u64A[id]);
//
f16_atomics(f16OUT+id, (__half *)(u16_slm+sid), f16A[id]);
//
f16x2_atomics(f16x2OUT+id, (__half2 *)u32_slm+sid2, f16x2A[id]);
//
f32_atomics(f32OUT+id, f32_slm+sid2, f32A[id]);
// f32_atomics_slm(f32_slm+sid2,f32A[id]);
//
f64_atomics(f64OUT+id, f64_slm+sid2, f64A[id]);
//
__syncthreads();
u16OUT[sid] += u16_slm[threadIdx.x];
u32OUT[sid] += u32_slm[threadIdx.x];
u64OUT[sid] += u64_slm[threadIdx.x];
f32OUT[sid] += f32_slm[threadIdx.x];
f64OUT[sid] += f64_slm[threadIdx.x];
}
/*
extern "C" __global__ void run_atomics_inline_ptx(
__half *f16OUT,
const __half *f16A,
__half2 *f16x2OUT,
const __half2 *f16x2A,
float *f32OUT,
const float *f32A,
double *f64OUT,
const double *f64A,
int32_t *i32OUT,
const int32_t *i32A,
uint32_t *u32OUT,
const uint32_t *u32A,
uint16_t *u16OUT,
const uint16_t *u16A,
int64_t *s64OUT,
const int64_t *s64A,
uint64_t *u64OUT,
const uint64_t *u64A)
{
__shared__ uint16_t u16_slm[32];
__shared__ uint32_t u32_slm[32];
__shared__ float f32_slm[32];
int id = blockDim.x * blockIdx.x + threadIdx.x;
int sid = threadIdx.x % 32;
u32_slm[sid] = u32A[id];
f32_slm[sid] = f32A[id];
__syncthreads();
uint16_t r = 0, s = 0, t = 1;
asm("atom.global.cas.b16 %0, [%1], %2, %3;" :
"+h"(r) : "l"(u16OUT+id), "h"(s), "h"(t));
asm("atom.shared.cas.b16 %0, [%1], %2, %3;" :
"+h"(r) : "l"(u16_slm+sid), "h"(s), "h"(t));
__syncthreads();
u16OUT[id] = r + s + t + u16_slm[sid];
u32OUT[id] = u32_slm[sid];
f32OUT[id] = f32_slm[sid];
}
*/
|
93f4293eb73ebc6f208fccb2c4e0e9c5d46503aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposeUnroll4Row(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x * 4 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix; // access in rows
unsigned int to = ix * ny + iy; // access in columns
if (ix + 3 * blockDim.x < nx && iy < ny)
{
out[to] = in[ti];
out[to + ny * blockDim.x] = in[ti + blockDim.x];
out[to + ny * 2 * blockDim.x] = in[ti + 2 * blockDim.x];
out[to + ny * 3 * blockDim.x] = in[ti + 3 * blockDim.x];
}
}
|
93f4293eb73ebc6f208fccb2c4e0e9c5d46503aa.cu
|
#include "includes.h"
__global__ void transposeUnroll4Row(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x * 4 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix; // access in rows
unsigned int to = ix * ny + iy; // access in columns
if (ix + 3 * blockDim.x < nx && iy < ny)
{
out[to] = in[ti];
out[to + ny * blockDim.x] = in[ti + blockDim.x];
out[to + ny * 2 * blockDim.x] = in[ti + 2 * blockDim.x];
out[to + ny * 3 * blockDim.x] = in[ti + 3 * blockDim.x];
}
}
|
48651d2d90a4b1ff049a75f0519508446b389159.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
namespace apollo {
namespace perception {
namespace lidar {
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_x(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * bdim * 2 + 2 * thid] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid * bdim * 2 + 2 * thid + 1] =
temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1];
} else {
g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2];
}
}
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_y(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int gdim = gridDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid + 2 * thid * gdim]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid + 2 * thid * gdim] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid + 2 * thid * gdim + gdim] =
temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim];
} else {
g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2];
}
}
__global__ void make_anchor_mask_kernel(
const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y,
const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y,
int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range,
const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size,
const int num_inds_for_scan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0};
const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1
const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1
anchor_coor[0] =
floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[1] =
floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[2] =
floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[3] =
floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[0] = max(anchor_coor[0], 0);
anchor_coor[1] = max(anchor_coor[1], 0);
anchor_coor[2] = min(anchor_coor[2], grid_x_size_1);
anchor_coor[3] = min(anchor_coor[3], grid_y_size_1);
int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[2]];
int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[0]];
int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[0]];
int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[2]];
int area = right_top - left_top - right_bottom + left_bottom;
if (area > 1) {
dev_anchor_mask[tid] = 1;
} else {
dev_anchor_mask[tid] = 0;
}
}
AnchorMaskCuda::AnchorMaskCuda(const int num_threads,
const int num_inds_for_scan,
const int num_anchor,
const float min_x_range,
const float min_y_range,
const float pillar_x_size,
const float pillar_y_size,
const int grid_x_size,
const int grid_y_size)
: num_threads_(num_threads),
num_inds_for_scan_(num_inds_for_scan),
num_anchor_(num_anchor),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size) {}
void AnchorMaskCuda::DoAnchorMaskCuda(
int* dev_sparse_pillar_map, int* dev_cumsum_along_x,
int* dev_cumsum_along_y, const float* dev_box_anchors_min_x,
const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x,
const float* dev_box_anchors_max_y, int* dev_anchor_mask) {
hipLaunchKernelGGL(( scan_x), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2),
num_inds_for_scan_ * sizeof(int), 0,
dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_);
hipLaunchKernelGGL(( scan_y), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2),
num_inds_for_scan_ * sizeof(int), 0,
dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_);
GPU_CHECK(hipMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y,
num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int),
hipMemcpyDeviceToDevice));
int num_blocks = DIVUP(num_anchor_, num_threads_);
hipLaunchKernelGGL(( make_anchor_mask_kernel), dim3(num_blocks), dim3(num_threads_), 0, 0,
dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x,
dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask,
min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_,
grid_y_size_, num_inds_for_scan_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
48651d2d90a4b1ff049a75f0519508446b389159.cu
|
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
namespace apollo {
namespace perception {
namespace lidar {
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_x(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * bdim * 2 + 2 * thid] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid * bdim * 2 + 2 * thid + 1] =
temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1];
} else {
g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2];
}
}
// modified prefix sum code from
// https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void scan_y(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int gdim = gridDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid + 2 * thid * gdim]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid + 2 * thid * gdim] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid + 2 * thid * gdim + gdim] =
temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim];
} else {
g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2];
}
}
__global__ void make_anchor_mask_kernel(
const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y,
const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y,
int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range,
const float min_y_range, const float pillar_x_size,
const float pillar_y_size, const int grid_x_size, const int grid_y_size,
const int num_inds_for_scan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0};
const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1
const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1
anchor_coor[0] =
floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[1] =
floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[2] =
floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size);
anchor_coor[3] =
floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size);
anchor_coor[0] = max(anchor_coor[0], 0);
anchor_coor[1] = max(anchor_coor[1], 0);
anchor_coor[2] = min(anchor_coor[2], grid_x_size_1);
anchor_coor[3] = min(anchor_coor[3], grid_y_size_1);
int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[2]];
int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[0]];
int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan +
anchor_coor[0]];
int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan +
anchor_coor[2]];
int area = right_top - left_top - right_bottom + left_bottom;
if (area > 1) {
dev_anchor_mask[tid] = 1;
} else {
dev_anchor_mask[tid] = 0;
}
}
AnchorMaskCuda::AnchorMaskCuda(const int num_threads,
const int num_inds_for_scan,
const int num_anchor,
const float min_x_range,
const float min_y_range,
const float pillar_x_size,
const float pillar_y_size,
const int grid_x_size,
const int grid_y_size)
: num_threads_(num_threads),
num_inds_for_scan_(num_inds_for_scan),
num_anchor_(num_anchor),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size) {}
void AnchorMaskCuda::DoAnchorMaskCuda(
int* dev_sparse_pillar_map, int* dev_cumsum_along_x,
int* dev_cumsum_along_y, const float* dev_box_anchors_min_x,
const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x,
const float* dev_box_anchors_max_y, int* dev_anchor_mask) {
scan_x<<<num_inds_for_scan_, num_inds_for_scan_ / 2,
num_inds_for_scan_ * sizeof(int)>>>(
dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_);
scan_y<<<num_inds_for_scan_, num_inds_for_scan_ / 2,
num_inds_for_scan_ * sizeof(int)>>>(
dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_);
GPU_CHECK(cudaMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y,
num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int),
cudaMemcpyDeviceToDevice));
int num_blocks = DIVUP(num_anchor_, num_threads_);
make_anchor_mask_kernel<<<num_blocks, num_threads_>>>(
dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x,
dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask,
min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_,
grid_y_size_, num_inds_for_scan_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
a71c25903cd86c865ca725d00d3404d852da4e56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_lapack.h>
#include <algorithm>
#ifdef AMGX_USE_MAGMA
#define ADD_ 1
#define HAVE_CUBLAS 1
#include <magma.h>
#endif
#include <amgx_cublas.h>
namespace amgx
{
#define lapackCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Lapack error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Lapack error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
#define magmaCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Magma error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Magma error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_lapack_enabled()
{
#ifndef AMGX_USE_LAPACK
FatalError("Error: LAPACK not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on host.", AMGX_ERR_CONFIGURATION);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on device.", AMGX_ERR_CONFIGURATION);
}
namespace
{
#ifdef AMGX_USE_LAPACK
struct _fcomplex { float re, im; };
typedef struct _fcomplex fcomplex;
struct _dcomplex { double re, im; };
typedef struct _dcomplex dcomplex;
extern "C"
int dgeev_(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info);
extern "C"
int sgeev_(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info);
extern "C"
int cgeev_(char *jobvl, char *jobvr, int *n, fcomplex *a,
int *lda, fcomplex *wr, fcomplex *wi, fcomplex *vl,
int *ldvl, fcomplex *vr, int *ldvr, fcomplex *work,
int *lwork, int *info);
extern "C"
int zgeev_(char *jobvl, char *jobvr, int *n, dcomplex *a,
int *lda, dcomplex *wr, dcomplex *wi, dcomplex *vl,
int *ldvl, dcomplex *vr, int *ldvr, dcomplex *work,
int *lwork, int *info);
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, hipComplex *a,
int *lda, hipComplex *wr, hipComplex *wi, hipComplex *vl,
int *ldvl, hipComplex *vr, int *ldvr, hipComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n, reinterp_cast<fcomplex *>(a),
lda, reinterp_cast<fcomplex *>(wr), reinterp_cast<fcomplex *>(wi), reinterp_cast<fcomplex *>(vl),
ldvl, reinterp_cast<fcomplex *>(vr), ldvr, work, reinterp_cast<fcomplex *>(lwork),
info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, hipDoubleComplex *a,
int *lda, hipDoubleComplex *wr, hipDoubleComplex *wi, hipDoubleComplex *vl,
int *ldvl, hipDoubleComplex *vr, int *ldvr, hipDoubleComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n, reinterp_cast<dcomplex *>(a),
lda, reinterp_cast<dcomplex *>(wr), reinterp_cast<dcomplex *>(wi), reinterp_cast<dcomplex *>(vl),
ldvl, reinterp_cast<dcomplex *>(vr), ldvr, work, reinterp_cast<dcomplex *>(lwork),
info);
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, int dim, int lda)
{
char job = 'N';
T *WI = new T[dim];
int ldv = 1;
T *vl = 0;
int work_size = 6 * dim;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI, vl, &ldv,
vl, &ldv, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
T *WI = new T[dim * dim];
int work_size = 6 * dim;
T *vl = 0;
int ldvl = 1;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI, vl, &ldvl,
eigenvectors, &ldvr, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
#endif
} // end anonymous namespace
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A,
Vector<TConfig> &eigenvalues,
Vector<TConfig> &eigenvector)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), eigenvector.raw(), dim, lda, eigenvector.get_lda());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dtrtri_(char *uplo, char *diag, int *n, double *
a, int *lda, int *info);
extern "C"
int strtri_(char *uplo, char *diag, int *n, float *
a, int *lda, int *info);
extern "C"
int ctrtri_(char *uplo, char *diag, int *n, fcomplex *
a, int *lda, int *info);
extern "C"
int ztrtri_(char *uplo, char *diag, int *n, dcomplex *
a, int *lda, int *info);
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, float *a,
int *lda, int *info)
{
return strtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, double *a,
int *lda, int *info)
{
return dtrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, fcomplex *a,
int *lda, int *info)
{
return ctrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, dcomplex *a,
int *lda, int *info)
{
return ztrtri_(uplo, diag, n, a, lda, info);
}
template <typename T>
void lapack_trtri(T *A, int dim, int lda)
{
char uplo = 'U';
char diag = 'N';
int info;
lapack_trtri_dispatch(&uplo, &diag, &dim, A, &lda, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_trtri(A.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, float *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, double *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, hipComplex *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, hipDoubleComplex *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
template <typename T>
void magma_trtri(T *A, int dim, int lda)
{
magma_uplo_t uplo = MagmaUpper;
magma_diag_t diag = MagmaNonUnit;
int info;
magma_trtri_dispatch(uplo, diag, dim, A, lda, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_trtri(A.raw(), dim, lda);;
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dsygv_(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info);
extern "C"
int ssygv_(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info);
int csygv_(int *itype, char *jobz, char *uplo, int *n,
fcomplex *a, int *lda, fcomplex *b, int *ldb,
fcomplex *w, fcomplex *work, int *lwork, int *info);
extern "C"
int zsygv_(int *itype, char *jobz, char *uplo, int *n,
dcomplex *a, int *lda, dcomplex *b, int *ldb,
dcomplex *w, dcomplex *work, int *lwork, int *info);
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info)
{
return dsygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info)
{
return ssygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
hipComplex *a, int *lda, hipComplex *b, int *ldb,
hipComplex *w, hipComplex *work, int *lwork, int *info)
{
return csygv_(itype, jobz, uplo, n,
reinterp_cast<fcomplex *>(a), lda, reinterp_cast<fcomplex *>(b), ldb,
reinterp_cast<fcomplex *>(w), reinterp_cast<fcomplex *>(work), lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
hipComplex *a, int *lda, hipDoubleComplex *b, int *ldb,
hipDoubleComplex *w, hipDoubleComplex *work, int *lwork, int *info)
{
return zsygv_(itype, jobz, uplo, n,
reinterp_cast<dcomplex *>(a), lda, reinterp_cast<dcomplex *>(b), ldb,
reinterp_cast<dcomplex *>(w), reinterp_cast<dcomplex *>(work), lwork, info);
}
template <typename T>
void lapack_sygv(T *gramA, T *gramB, T *eigenvector, int dim, int lda, T *work)
{
int itype = 1;
char jobz = 'V';
char uplo = 'U';
int ldb = lda;
int lwork = 1024;
int info = 0;
lapack_sygv_dispatch(&itype, &jobz, &uplo, &dim, gramA, &lda, gramB, &ldb, eigenvector, work, &lwork, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig> &eigenvalues, Vector<TConfig> &work)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_sygv(A.raw(), B.raw(), eigenvalues.raw(), dim, lda, work.raw());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, float *A, int lda, int *info)
{
return magma_spotrf_gpu(uplo, n, A, lda, info);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, double *A, int lda, int *info)
{
return magma_dpotrf_gpu(uplo, n, A, lda, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, float *da,
int ldda, float *B, int lddb, int *info)
{
return magma_ssygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, double *da,
int ldda, double *B, int lddb, int *info)
{
return magma_dsygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, double *da, int ldda,
double *w, double *wa, int ldwa, double *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_dsyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, float *da, int ldda,
float *w, float *wa, int ldwa, float *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_ssyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
// This is a simple modification of the magma_?sygvd() source code
// from magma where the matrices are already on the device.
template <typename T>
magma_int_t magma_sygvd_gpu_impl(magma_int_t itype, magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n,
T *da, magma_int_t lda, T *db, magma_int_t ldb, T *w,
T *work, magma_int_t lwork, magma_int_t *iwork, magma_int_t liwork,
T *wa, magma_int_t *info)
{
magma_uplo_t uplo_[2] = {uplo, MagmaLower}; // {uplo, 0}
magma_vec_t jobz_[2] = {jobz, MagmaVec};//{jobz, 0};
T d_one = MAGMA_D_ONE;
magma_int_t ldda = n;
magma_int_t lddb = n;
static magma_int_t lower;
static char trans[1];
static magma_int_t wantz, lquery;
static magma_int_t lopt, lwmin, liopt, liwmin;
static hipStream_t stream;
magma_queue_create( &stream );
wantz = jobz_[0] == MagmaVec;
lower = uplo_[0] == MagmaLower;
lquery = lwork == -1 || liwork == -1;
*info = 0;
if (itype < 1 || itype > 3)
{
*info = -1;
}
else if (! (wantz || jobz_[0] == MagmaNoVec))
{
*info = -2;
}
else if (! (lower || uplo_[0] == MagmaUpper))
{
*info = -3;
}
else if (n < 0)
{
*info = -4;
}
else if (lda < max(1, n))
{
*info = -6;
}
else if (ldb < max(1, n))
{
*info = -8;
}
magma_int_t nb = magma_get_dsytrd_nb(n);
if (n < 1)
{
liwmin = 1;
lwmin = 1;
}
else if (wantz)
{
lwmin = 1 + 6 * n * nb + 2 * n * n;
liwmin = 5 * n + 3;
}
else
{
lwmin = 2 * n * nb + 1;
liwmin = 1;
}
lopt = lwmin;
liopt = liwmin;
work[ 0] = lopt;
iwork[0] = liopt;
if (lwork < lwmin && ! lquery)
{
*info = -11;
}
else if (liwork < liwmin && ! lquery)
{
*info = -13;
}
if (*info != 0)
{
magma_xerbla( __func__, -(*info) );
return MAGMA_ERR_ILLEGAL_VALUE;
}
else if (lquery)
{
return MAGMA_SUCCESS;
}
/* Quick return if possible */
if (n == 0)
{
return 0;
}
magma_potrf_gpu_dispatch(uplo_[0], n, db, lddb, info);
if (*info != 0)
{
*info = n + *info;
return 0;
}
/* Transform problem to standard eigenvalue problem and solve. */
magma_sygst_gpu_dispatch(itype, uplo_[0], n, da, ldda, db, lddb, info);
magma_syevd_gpu_dispatch(jobz_[0], uplo_[0], n, da, ldda, w, wa, lda,
work, lwork, iwork, liwork, info);
lopt = max( lopt, (magma_int_t) work[0]);
liopt = max(liopt, iwork[0]);
if (wantz && *info == 0)
{
/* Backtransform eigenvectors to the original problem. */
if (itype == 1 || itype == 2)
{
/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x;
backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
if (lower)
{
*(unsigned char *)trans = MagmaTrans;
}
else
{
*(unsigned char *)trans = MagmaNoTrans;
}
magma_trsm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
else if (itype == 3)
{
/* For B*A*x=(lambda)*x;
backtransform eigenvectors: x = L*y or U'*y */
if (lower)
{
*(unsigned char *)trans = MagmaNoTrans;
}
else
{
*(unsigned char *)trans = MagmaTrans;
}
magma_trmm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
}
magma_queue_sync( stream );
magma_queue_destroy( stream );
work[0] = (T) lopt;
iwork[0] = liopt;
return MAGMA_SUCCESS;
}
hipblasStatus_t cublas_trsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb)
{
return hipblasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
hipblasStatus_t cublas_trsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb)
{
return hipblasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
template <typename T>
void magma_sygvd_gpu(T *A, T *B, T *eigenvalues, int dim, int lda)
{
int itype = 1;
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int ldb = lda;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
/*
magma_sygvd_gpu_impl(itype, jobz, uplo, N, A, lda, B, ldb, eigenvalues, work, lwork, iwork, liwork, wa, &info);
*/
magma_potrf_gpu_dispatch(uplo, N, B, lda, &info);
magmaCheckError(info);
magma_sygst_gpu_dispatch(itype, uplo, N, A, lda, B, ldb, &info);
magmaCheckError(info);
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
T one = 1;
hipblasHandle_t handle = Cublas::get_handle();
cublas_trsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, N, N, &one, B, ldb, A, lda);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig_h> &eigenvalues, Vector<TConfig> &work)
{
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_sygvd_gpu(A.raw(), B.raw(), eigenvalues.raw(), dim, lda);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
template <typename T>
void magma_syevd_gpu(T *A, T *eigenvalues, int dim, int lda)
{
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig_h> &eigenvalues)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_syevd_gpu(A.raw(), eigenvalues.raw(), dim, lda);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_stedx_dispatch(magma_range_t range, int n,
double vl, double vu,
int il, int iu,
double *d, double *e, double *z, int ldz,
double *work, int lwork, int *iwork, int liwork,
double *dwork, int *info)
{
return magma_dstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
int magma_stedx_dispatch(magma_range_t range, int n,
float vl, float vu,
int il, int iu,
float *d, float *e, float *z, int ldz,
float *work, int lwork, int *iwork, int liwork,
float *dwork, int *info)
{
return magma_sstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
template <typename T>
void magma_stedx(T *diagonal, T *subdiagonal, T *eigenvectors,
int lower, int upper, int dim, int ldz, T *dwork, int dwork_size)
{
magma_range_t range = MagmaRangeI;
int N = dim;
T vl = 0;
T vu = 0;
int il = lower;
int iu = upper;
int lwork = 1 + 4 * N + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
int liwork = 3 + 6 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int info;
magma_stedx_dispatch(range, N, vl, vu, il, iu, diagonal, subdiagonal, eigenvectors, ldz,
&s_work[0], lwork, &s_iwork[0], liwork, dwork, &info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::stedx(Vector<TConfig> &diagonal,
Vector<TConfig> &subdiagonal,
Vector<TConfig> &eigenvectors,
int dim,
Vector<TConfig_d> &dwork)
{
check_magma_enabled();
#ifdef AMGX_USE_MAGMA
magma_stedx(diagonal.raw(), subdiagonal.raw(), eigenvectors.raw(),
dim, dim, dim, eigenvectors.get_lda(),
dwork.raw(), dwork.size());
#endif
}
namespace
{
template <typename T>
void larf(int m, int n, T *v,
int incv, T *tau, T *c, int ldc,
T *work)
{
/* Table of constant values */
static T c_b4 = 1.;
static T c_b5 = 0.;
static int c1 = 1;
/* Form H * C */
/* w := C' * v */
Cublas::gemv(true, m, n, &c_b4, c, ldc,
v, incv, &c_b5, work, c1);
/* C := C - v * w' */
Cublas::ger(m, n, tau, v, incv, work, c1, c, ldc);
}
template <typename T>
__global__
void set1(T *a)
{
*a = 1.;
}
template <typename T>
__global__
void add_tau(T *a, T tau)
{
*a = 1 + tau;
}
template <typename T>
void gpu_orgqr(int m, int n, int k,
T *a, int lda, T *tau, T *work, int lwork)
{
int i1, i2;
for (int i = k - 1; i >= 0; --i)
{
/* Apply H(i) to A(i:m,i:n) from the left */
if (i < n - 1)
{
hipLaunchKernelGGL(( set1) , dim3(1), dim3(1), 0, 0, &a[i + i * lda]);
i1 = m - i;
i2 = n - i - 1;
larf(i1, i2, &a[i + i * lda], 1, &tau[i],
&a[i + (i + 1) * lda], lda, work);
}
if (i < m - 1)
{
i1 = m - i - 1;
Cublas::scal(i1, &tau[i], &a[i + 1 + i * lda], 1);
}
hipLaunchKernelGGL(( add_tau) , dim3(1), dim3(1), 0, 0, &a[i + i * lda], tau[i]);
/* Set A(1:i-1,i) to zero */
hipMemset(&a[i * lda], 0, sizeof(T) * i);
}
cudaCheckError();
}
template <typename T>
__device__ __host__
T lapy2_(T *a, T *b)
{
T va = *a;
T vb = *b;
return sqrt(va * va + vb * vb);
}
template <typename T>
__device__ __host__
T d_sign(T a, T b)
{
T x;
x = (a >= 0 ? a : - a);
return (b >= 0 ? x : -x);
}
template <typename T>
void compute_tau_host(T *alpha, T *norm,
T *tau, T *d1)
{
*d1 = lapy2_(alpha, norm);
T beta = -d_sign(*d1, *alpha);
// LAPACK: skipped part about scaling.
// Negated compared to LAPACK code, avoid negating value on device later.
*tau = -(beta - *alpha) / beta;
*d1 = 1. / (*alpha - beta);
*alpha = beta;
}
template <typename T>
void larfg(int n, T *alpha, T *x,
int incx, T *tau)
{
if (n <= 1)
{
*tau = 0.;
return;
}
int i1 = n - 1;
T xnorm;
Cublas::nrm2(i1, x, incx, &xnorm);
T h_alpha;
hipMemcpy(&h_alpha, alpha, sizeof(T), hipMemcpyDeviceToHost);
T d1;
compute_tau_host(&h_alpha, &xnorm, tau, &d1);
Cublas::scal(i1, d1, x, incx);
// Update the diagonal value on the device.
hipMemcpy(alpha, &h_alpha, sizeof(T), hipMemcpyHostToDevice);
}
template <typename T>
void gpu_geqrf(int m, int n, T *a, int lda,
T *tau, T *work)
{
int k = ::min(m, n);
T *aii;
hipMalloc(&aii, sizeof(T));
for (int i = 0; i < k; ++i)
{
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
int i2 = m - i;
/* Computing MIN */
int i3 = i + 1;
larfg(i2, &a[i + i * lda],
&a[::min(i3, m - 1) + i * lda],
1, &tau[i]);
if (i < n - 1)
{
/* Apply H(i) to A(i:m,i+1:n) from the left */
hipMemcpy(aii, &a[i + i * lda], sizeof(T), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( set1) , dim3(1), dim3(1), 0, 0, &a[i + i * lda]);
cudaCheckError();
i2 = m - i;
i3 = n - i - 1;
larf(i2, i3, &a[i + i * lda], 1,
&tau[i], &a[i + (i + 1) * lda], lda, work);
hipMemcpy(&a[i + i * lda], aii, sizeof(T), hipMemcpyDeviceToDevice);
}
}
hipFree(aii);
}
} // end anonymous namespace
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_geqrf_dispatch(int m, int n, float *A, int lda,
float *tau, float *work, int *info)
{
return magma_sgeqrf_gpu(m, n, A, lda, tau, work, info);
}
int magma_geqrf_dispatch(int m, int n, double *A, int lda,
double *tau, double *work, int *info)
{
return magma_dgeqrf_gpu(m, n, A, lda, tau, work, info);
}
template <typename T>
void magma_geqrf(int m, int n, T *A, int lda,
T *tau, T *work)
{
int info;
magma_geqrf_dispatch(m, n, A, lda, tau, work, &info);
magmaCheckError(info);
}
int magma_orgqr_dispatch(int m, int n, int k, float *A, int lda,
float *tau, float *work, int lwork, int *info)
{
return magma_sorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
int magma_orgqr_dispatch(int m, int n, int k, double *A, int lda,
double *tau, double *work, int lwork, int *info)
{
return magma_dorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
template <typename T>
void magma_orgqr(int m, int n, int k, T *A, int lda,
T *tau, T *work, int lwork)
{
int info;
magma_orgqr_dispatch(m, n, k, A, lda, tau, work, lwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#else
gpu_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#else
gpu_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#endif
}
#define AMGX_CASE_LINE(CASE) \
template class Lapack<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
|
a71c25903cd86c865ca725d00d3404d852da4e56.cu
|
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_lapack.h>
#include <algorithm>
#ifdef AMGX_USE_MAGMA
#define ADD_ 1
#define HAVE_CUBLAS 1
#include <magma.h>
#endif
#include <amgx_cublas.h>
namespace amgx
{
#define lapackCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Lapack error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Lapack error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
#define magmaCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Magma error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Magma error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_lapack_enabled()
{
#ifndef AMGX_USE_LAPACK
FatalError("Error: LAPACK not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on host.", AMGX_ERR_CONFIGURATION);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on device.", AMGX_ERR_CONFIGURATION);
}
namespace
{
#ifdef AMGX_USE_LAPACK
struct _fcomplex { float re, im; };
typedef struct _fcomplex fcomplex;
struct _dcomplex { double re, im; };
typedef struct _dcomplex dcomplex;
extern "C"
int dgeev_(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info);
extern "C"
int sgeev_(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info);
extern "C"
int cgeev_(char *jobvl, char *jobvr, int *n, fcomplex *a,
int *lda, fcomplex *wr, fcomplex *wi, fcomplex *vl,
int *ldvl, fcomplex *vr, int *ldvr, fcomplex *work,
int *lwork, int *info);
extern "C"
int zgeev_(char *jobvl, char *jobvr, int *n, dcomplex *a,
int *lda, dcomplex *wr, dcomplex *wi, dcomplex *vl,
int *ldvl, dcomplex *vr, int *ldvr, dcomplex *work,
int *lwork, int *info);
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, cuComplex *a,
int *lda, cuComplex *wr, cuComplex *wi, cuComplex *vl,
int *ldvl, cuComplex *vr, int *ldvr, cuComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n, reinterp_cast<fcomplex *>(a),
lda, reinterp_cast<fcomplex *>(wr), reinterp_cast<fcomplex *>(wi), reinterp_cast<fcomplex *>(vl),
ldvl, reinterp_cast<fcomplex *>(vr), ldvr, work, reinterp_cast<fcomplex *>(lwork),
info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, cuDoubleComplex *a,
int *lda, cuDoubleComplex *wr, cuDoubleComplex *wi, cuDoubleComplex *vl,
int *ldvl, cuDoubleComplex *vr, int *ldvr, cuDoubleComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n, reinterp_cast<dcomplex *>(a),
lda, reinterp_cast<dcomplex *>(wr), reinterp_cast<dcomplex *>(wi), reinterp_cast<dcomplex *>(vl),
ldvl, reinterp_cast<dcomplex *>(vr), ldvr, work, reinterp_cast<dcomplex *>(lwork),
info);
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, int dim, int lda)
{
char job = 'N';
T *WI = new T[dim];
int ldv = 1;
T *vl = 0;
int work_size = 6 * dim;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI, vl, &ldv,
vl, &ldv, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
T *WI = new T[dim * dim];
int work_size = 6 * dim;
T *vl = 0;
int ldvl = 1;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI, vl, &ldvl,
eigenvectors, &ldvr, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
#endif
} // end anonymous namespace
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A,
Vector<TConfig> &eigenvalues,
Vector<TConfig> &eigenvector)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), eigenvector.raw(), dim, lda, eigenvector.get_lda());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dtrtri_(char *uplo, char *diag, int *n, double *
a, int *lda, int *info);
extern "C"
int strtri_(char *uplo, char *diag, int *n, float *
a, int *lda, int *info);
extern "C"
int ctrtri_(char *uplo, char *diag, int *n, fcomplex *
a, int *lda, int *info);
extern "C"
int ztrtri_(char *uplo, char *diag, int *n, dcomplex *
a, int *lda, int *info);
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, float *a,
int *lda, int *info)
{
return strtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, double *a,
int *lda, int *info)
{
return dtrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, fcomplex *a,
int *lda, int *info)
{
return ctrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, dcomplex *a,
int *lda, int *info)
{
return ztrtri_(uplo, diag, n, a, lda, info);
}
template <typename T>
void lapack_trtri(T *A, int dim, int lda)
{
char uplo = 'U';
char diag = 'N';
int info;
lapack_trtri_dispatch(&uplo, &diag, &dim, A, &lda, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_trtri(A.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, float *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, double *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, cuComplex *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, cuDoubleComplex *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
template <typename T>
void magma_trtri(T *A, int dim, int lda)
{
magma_uplo_t uplo = MagmaUpper;
magma_diag_t diag = MagmaNonUnit;
int info;
magma_trtri_dispatch(uplo, diag, dim, A, lda, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_trtri(A.raw(), dim, lda);;
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dsygv_(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info);
extern "C"
int ssygv_(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info);
int csygv_(int *itype, char *jobz, char *uplo, int *n,
fcomplex *a, int *lda, fcomplex *b, int *ldb,
fcomplex *w, fcomplex *work, int *lwork, int *info);
extern "C"
int zsygv_(int *itype, char *jobz, char *uplo, int *n,
dcomplex *a, int *lda, dcomplex *b, int *ldb,
dcomplex *w, dcomplex *work, int *lwork, int *info);
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info)
{
return dsygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info)
{
return ssygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
cuComplex *a, int *lda, cuComplex *b, int *ldb,
cuComplex *w, cuComplex *work, int *lwork, int *info)
{
return csygv_(itype, jobz, uplo, n,
reinterp_cast<fcomplex *>(a), lda, reinterp_cast<fcomplex *>(b), ldb,
reinterp_cast<fcomplex *>(w), reinterp_cast<fcomplex *>(work), lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
cuComplex *a, int *lda, cuDoubleComplex *b, int *ldb,
cuDoubleComplex *w, cuDoubleComplex *work, int *lwork, int *info)
{
return zsygv_(itype, jobz, uplo, n,
reinterp_cast<dcomplex *>(a), lda, reinterp_cast<dcomplex *>(b), ldb,
reinterp_cast<dcomplex *>(w), reinterp_cast<dcomplex *>(work), lwork, info);
}
template <typename T>
void lapack_sygv(T *gramA, T *gramB, T *eigenvector, int dim, int lda, T *work)
{
int itype = 1;
char jobz = 'V';
char uplo = 'U';
int ldb = lda;
int lwork = 1024;
int info = 0;
lapack_sygv_dispatch(&itype, &jobz, &uplo, &dim, gramA, &lda, gramB, &ldb, eigenvector, work, &lwork, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig> &eigenvalues, Vector<TConfig> &work)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_sygv(A.raw(), B.raw(), eigenvalues.raw(), dim, lda, work.raw());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, float *A, int lda, int *info)
{
return magma_spotrf_gpu(uplo, n, A, lda, info);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, double *A, int lda, int *info)
{
return magma_dpotrf_gpu(uplo, n, A, lda, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, float *da,
int ldda, float *B, int lddb, int *info)
{
return magma_ssygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, double *da,
int ldda, double *B, int lddb, int *info)
{
return magma_dsygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, double *da, int ldda,
double *w, double *wa, int ldwa, double *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_dsyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, float *da, int ldda,
float *w, float *wa, int ldwa, float *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_ssyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
// This is a simple modification of the magma_?sygvd() source code
// from magma where the matrices are already on the device.
template <typename T>
magma_int_t magma_sygvd_gpu_impl(magma_int_t itype, magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n,
T *da, magma_int_t lda, T *db, magma_int_t ldb, T *w,
T *work, magma_int_t lwork, magma_int_t *iwork, magma_int_t liwork,
T *wa, magma_int_t *info)
{
magma_uplo_t uplo_[2] = {uplo, MagmaLower}; // {uplo, 0}
magma_vec_t jobz_[2] = {jobz, MagmaVec};//{jobz, 0};
T d_one = MAGMA_D_ONE;
magma_int_t ldda = n;
magma_int_t lddb = n;
static magma_int_t lower;
static char trans[1];
static magma_int_t wantz, lquery;
static magma_int_t lopt, lwmin, liopt, liwmin;
static cudaStream_t stream;
magma_queue_create( &stream );
wantz = jobz_[0] == MagmaVec;
lower = uplo_[0] == MagmaLower;
lquery = lwork == -1 || liwork == -1;
*info = 0;
if (itype < 1 || itype > 3)
{
*info = -1;
}
else if (! (wantz || jobz_[0] == MagmaNoVec))
{
*info = -2;
}
else if (! (lower || uplo_[0] == MagmaUpper))
{
*info = -3;
}
else if (n < 0)
{
*info = -4;
}
else if (lda < max(1, n))
{
*info = -6;
}
else if (ldb < max(1, n))
{
*info = -8;
}
magma_int_t nb = magma_get_dsytrd_nb(n);
if (n < 1)
{
liwmin = 1;
lwmin = 1;
}
else if (wantz)
{
lwmin = 1 + 6 * n * nb + 2 * n * n;
liwmin = 5 * n + 3;
}
else
{
lwmin = 2 * n * nb + 1;
liwmin = 1;
}
lopt = lwmin;
liopt = liwmin;
work[ 0] = lopt;
iwork[0] = liopt;
if (lwork < lwmin && ! lquery)
{
*info = -11;
}
else if (liwork < liwmin && ! lquery)
{
*info = -13;
}
if (*info != 0)
{
magma_xerbla( __func__, -(*info) );
return MAGMA_ERR_ILLEGAL_VALUE;
}
else if (lquery)
{
return MAGMA_SUCCESS;
}
/* Quick return if possible */
if (n == 0)
{
return 0;
}
magma_potrf_gpu_dispatch(uplo_[0], n, db, lddb, info);
if (*info != 0)
{
*info = n + *info;
return 0;
}
/* Transform problem to standard eigenvalue problem and solve. */
magma_sygst_gpu_dispatch(itype, uplo_[0], n, da, ldda, db, lddb, info);
magma_syevd_gpu_dispatch(jobz_[0], uplo_[0], n, da, ldda, w, wa, lda,
work, lwork, iwork, liwork, info);
lopt = max( lopt, (magma_int_t) work[0]);
liopt = max(liopt, iwork[0]);
if (wantz && *info == 0)
{
/* Backtransform eigenvectors to the original problem. */
if (itype == 1 || itype == 2)
{
/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x;
backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
if (lower)
{
*(unsigned char *)trans = MagmaTrans;
}
else
{
*(unsigned char *)trans = MagmaNoTrans;
}
magma_trsm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
else if (itype == 3)
{
/* For B*A*x=(lambda)*x;
backtransform eigenvectors: x = L*y or U'*y */
if (lower)
{
*(unsigned char *)trans = MagmaNoTrans;
}
else
{
*(unsigned char *)trans = MagmaTrans;
}
magma_trmm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
}
magma_queue_sync( stream );
magma_queue_destroy( stream );
work[0] = (T) lopt;
iwork[0] = liopt;
return MAGMA_SUCCESS;
}
cublasStatus_t cublas_trsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb)
{
return cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
cublasStatus_t cublas_trsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb)
{
return cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
template <typename T>
void magma_sygvd_gpu(T *A, T *B, T *eigenvalues, int dim, int lda)
{
int itype = 1;
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int ldb = lda;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
/*
magma_sygvd_gpu_impl(itype, jobz, uplo, N, A, lda, B, ldb, eigenvalues, work, lwork, iwork, liwork, wa, &info);
*/
magma_potrf_gpu_dispatch(uplo, N, B, lda, &info);
magmaCheckError(info);
magma_sygst_gpu_dispatch(itype, uplo, N, A, lda, B, ldb, &info);
magmaCheckError(info);
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
T one = 1;
cublasHandle_t handle = Cublas::get_handle();
cublas_trsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, N, N, &one, B, ldb, A, lda);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig_h> &eigenvalues, Vector<TConfig> &work)
{
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_sygvd_gpu(A.raw(), B.raw(), eigenvalues.raw(), dim, lda);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
template <typename T>
void magma_syevd_gpu(T *A, T *eigenvalues, int dim, int lda)
{
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig_h> &eigenvalues)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_syevd_gpu(A.raw(), eigenvalues.raw(), dim, lda);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_stedx_dispatch(magma_range_t range, int n,
double vl, double vu,
int il, int iu,
double *d, double *e, double *z, int ldz,
double *work, int lwork, int *iwork, int liwork,
double *dwork, int *info)
{
return magma_dstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
int magma_stedx_dispatch(magma_range_t range, int n,
float vl, float vu,
int il, int iu,
float *d, float *e, float *z, int ldz,
float *work, int lwork, int *iwork, int liwork,
float *dwork, int *info)
{
return magma_sstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
template <typename T>
void magma_stedx(T *diagonal, T *subdiagonal, T *eigenvectors,
int lower, int upper, int dim, int ldz, T *dwork, int dwork_size)
{
magma_range_t range = MagmaRangeI;
int N = dim;
T vl = 0;
T vu = 0;
int il = lower;
int iu = upper;
int lwork = 1 + 4 * N + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
int liwork = 3 + 6 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int info;
magma_stedx_dispatch(range, N, vl, vu, il, iu, diagonal, subdiagonal, eigenvectors, ldz,
&s_work[0], lwork, &s_iwork[0], liwork, dwork, &info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::stedx(Vector<TConfig> &diagonal,
Vector<TConfig> &subdiagonal,
Vector<TConfig> &eigenvectors,
int dim,
Vector<TConfig_d> &dwork)
{
check_magma_enabled();
#ifdef AMGX_USE_MAGMA
magma_stedx(diagonal.raw(), subdiagonal.raw(), eigenvectors.raw(),
dim, dim, dim, eigenvectors.get_lda(),
dwork.raw(), dwork.size());
#endif
}
namespace
{
template <typename T>
void larf(int m, int n, T *v,
int incv, T *tau, T *c, int ldc,
T *work)
{
/* Table of constant values */
static T c_b4 = 1.;
static T c_b5 = 0.;
static int c1 = 1;
/* Form H * C */
/* w := C' * v */
Cublas::gemv(true, m, n, &c_b4, c, ldc,
v, incv, &c_b5, work, c1);
/* C := C - v * w' */
Cublas::ger(m, n, tau, v, incv, work, c1, c, ldc);
}
template <typename T>
__global__
void set1(T *a)
{
*a = 1.;
}
template <typename T>
__global__
void add_tau(T *a, T tau)
{
*a = 1 + tau;
}
template <typename T>
void gpu_orgqr(int m, int n, int k,
T *a, int lda, T *tau, T *work, int lwork)
{
int i1, i2;
for (int i = k - 1; i >= 0; --i)
{
/* Apply H(i) to A(i:m,i:n) from the left */
if (i < n - 1)
{
set1 <<< 1, 1>>>(&a[i + i * lda]);
i1 = m - i;
i2 = n - i - 1;
larf(i1, i2, &a[i + i * lda], 1, &tau[i],
&a[i + (i + 1) * lda], lda, work);
}
if (i < m - 1)
{
i1 = m - i - 1;
Cublas::scal(i1, &tau[i], &a[i + 1 + i * lda], 1);
}
add_tau <<< 1, 1>>>(&a[i + i * lda], tau[i]);
/* Set A(1:i-1,i) to zero */
cudaMemset(&a[i * lda], 0, sizeof(T) * i);
}
cudaCheckError();
}
template <typename T>
__device__ __host__
T lapy2_(T *a, T *b)
{
T va = *a;
T vb = *b;
return sqrt(va * va + vb * vb);
}
template <typename T>
__device__ __host__
T d_sign(T a, T b)
{
T x;
x = (a >= 0 ? a : - a);
return (b >= 0 ? x : -x);
}
template <typename T>
void compute_tau_host(T *alpha, T *norm,
T *tau, T *d1)
{
*d1 = lapy2_(alpha, norm);
T beta = -d_sign(*d1, *alpha);
// LAPACK: skipped part about scaling.
// Negated compared to LAPACK code, avoid negating value on device later.
*tau = -(beta - *alpha) / beta;
*d1 = 1. / (*alpha - beta);
*alpha = beta;
}
template <typename T>
void larfg(int n, T *alpha, T *x,
int incx, T *tau)
{
if (n <= 1)
{
*tau = 0.;
return;
}
int i1 = n - 1;
T xnorm;
Cublas::nrm2(i1, x, incx, &xnorm);
T h_alpha;
cudaMemcpy(&h_alpha, alpha, sizeof(T), cudaMemcpyDeviceToHost);
T d1;
compute_tau_host(&h_alpha, &xnorm, tau, &d1);
Cublas::scal(i1, d1, x, incx);
// Update the diagonal value on the device.
cudaMemcpy(alpha, &h_alpha, sizeof(T), cudaMemcpyHostToDevice);
}
template <typename T>
void gpu_geqrf(int m, int n, T *a, int lda,
T *tau, T *work)
{
int k = std::min(m, n);
T *aii;
cudaMalloc(&aii, sizeof(T));
for (int i = 0; i < k; ++i)
{
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
int i2 = m - i;
/* Computing MIN */
int i3 = i + 1;
larfg(i2, &a[i + i * lda],
&a[std::min(i3, m - 1) + i * lda],
1, &tau[i]);
if (i < n - 1)
{
/* Apply H(i) to A(i:m,i+1:n) from the left */
cudaMemcpy(aii, &a[i + i * lda], sizeof(T), cudaMemcpyDeviceToDevice);
set1 <<< 1, 1>>>(&a[i + i * lda]);
cudaCheckError();
i2 = m - i;
i3 = n - i - 1;
larf(i2, i3, &a[i + i * lda], 1,
&tau[i], &a[i + (i + 1) * lda], lda, work);
cudaMemcpy(&a[i + i * lda], aii, sizeof(T), cudaMemcpyDeviceToDevice);
}
}
cudaFree(aii);
}
} // end anonymous namespace
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_geqrf_dispatch(int m, int n, float *A, int lda,
float *tau, float *work, int *info)
{
return magma_sgeqrf_gpu(m, n, A, lda, tau, work, info);
}
int magma_geqrf_dispatch(int m, int n, double *A, int lda,
double *tau, double *work, int *info)
{
return magma_dgeqrf_gpu(m, n, A, lda, tau, work, info);
}
template <typename T>
void magma_geqrf(int m, int n, T *A, int lda,
T *tau, T *work)
{
int info;
magma_geqrf_dispatch(m, n, A, lda, tau, work, &info);
magmaCheckError(info);
}
int magma_orgqr_dispatch(int m, int n, int k, float *A, int lda,
float *tau, float *work, int lwork, int *info)
{
return magma_sorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
int magma_orgqr_dispatch(int m, int n, int k, double *A, int lda,
double *tau, double *work, int lwork, int *info)
{
return magma_dorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
template <typename T>
void magma_orgqr(int m, int n, int k, T *A, int lda,
T *tau, T *work, int lwork)
{
int info;
magma_orgqr_dispatch(m, n, k, A, lda, tau, work, lwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#else
gpu_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#else
gpu_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#endif
}
#define AMGX_CASE_LINE(CASE) \
template class Lapack<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
|
29cbaa9cbac8e95d36d39858d97e24474e6b85e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* special helper function for computing centroids */
#define KERNEL_FUNC_QUALIFIER __global__
#define CENT_KERNEL( typ ) \
\
KERNEL_FUNC_QUALIFIER void typ##_slow_cent_helper \
( /*std_type *x_array, dim3 inc1, std_type *y_array, dim3 inc2, \
std_type *input, dim3 inc3, dim3 len*/ DECLARE_KERN_ARGS_SLEN_3 ) \
\
{ \
dim3 index; \
uint32_t offset1, offset2, offset3; \
std_type p; \
\
index.x = blockIdx.x * blockDim.x + threadIdx.x; \
index.y = blockIdx.y * blockDim.y + threadIdx.y; \
\
offset1 = index.y * inc1.x + index.x; \
offset2 = index.y * inc2.x + index.x; \
offset3 = index.y * inc3.x + index.x; \
\
p = *(/*input*/ c + offset3); \
*(/*x_array*/a+offset1) = p * index.x; \
*(/*y_array*/b+offset2) = p * index.y; \
}
#define CK( c ) CENT_KERNEL( c )
CK( type_code )
|
29cbaa9cbac8e95d36d39858d97e24474e6b85e5.cu
|
/* special helper function for computing centroids */
#define KERNEL_FUNC_QUALIFIER __global__
#define CENT_KERNEL( typ ) \
\
KERNEL_FUNC_QUALIFIER void typ##_slow_cent_helper \
( /*std_type *x_array, dim3 inc1, std_type *y_array, dim3 inc2, \
std_type *input, dim3 inc3, dim3 len*/ DECLARE_KERN_ARGS_SLEN_3 ) \
\
{ \
dim3 index; \
uint32_t offset1, offset2, offset3; \
std_type p; \
\
index.x = blockIdx.x * blockDim.x + threadIdx.x; \
index.y = blockIdx.y * blockDim.y + threadIdx.y; \
\
offset1 = index.y * inc1.x + index.x; \
offset2 = index.y * inc2.x + index.x; \
offset3 = index.y * inc3.x + index.x; \
\
p = *(/*input*/ c + offset3); \
*(/*x_array*/a+offset1) = p * index.x; \
*(/*y_array*/b+offset2) = p * index.y; \
}
#define CK( c ) CENT_KERNEL( c )
CK( type_code )
|
e887deda2f287d6d03e92e43460a5d32a1720e86.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
*
* Date 11 june 2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== the distance between each query point and its k-th nearest neighbor in
* the reference point set. Only the distance is provided. The computation
* is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 1
// Includes
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Given a matrix of size width*height, compute the square norm of each column.
*
* @param mat : the matrix
* @param width : the number of columns for a colum major storage matrix
* @param height : the number of rowm for a colum major storage matrix
* @param norm : the vector containing the norm of the matrix
*/
__global__ void cuComputeNorm(float *mat, int width, int pitch, int height, float *norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
float val, sum=0;
int i;
for (i=0;i<height;i++){
val = mat[i*pitch+xIndex];
sum += val*val;
}
norm[xIndex] = sum;
}
}
/**
* Given the distance matrix of size width*height, adds the column vector
* of size 1*height to each column of the matrix.
*
* @param dist : the matrix
* @param width : the number of columns for a colum major storage matrix
* @param pitch : the pitch in number of column
* @param height : the number of rowm for a colum major storage matrix
* @param vec : the vector to be added
*/
__global__ void cuAddRNorm(float *dist, int width, int pitch, int height, float *vec){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty]=vec[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
dist[yIndex*pitch+xIndex]+=shared_vec[ty];
}
/**
* Given two row vectors with width column, adds the two vectors and compute
* the square root of the sum. The result is stored in the first vector.
*
* @param vec1 : the first vector
* @param vec2 : the second vector
* @param width : the number of columns for a colum major storage matrix
*/
__global__ void cuAddQNormAndSqrt(float *vec1, float *vec2, int width){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
vec1[xIndex] = sqrt(vec1[xIndex]+vec2[xIndex]);
}
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param height height of the distance matrix
* @param k number of smallest distance to consider
*/
__global__ void cuInsertionSort(float *dist, int width, int pitch, int height, int k){
// Variables
int l,i,j;
float *p;
float v, max_value;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift and max value
p = dist+xIndex;
max_value = *p;
// Part 1 : sort kth firt element
for (l=pitch;l<k*pitch;l+=pitch){
v = *(p+l);
if (v<max_value){
i=0; while (i<l && *(p+i)<=v) i+=pitch;
for (j=l;j>i;j-=pitch)
*(p+j) = *(p+j-pitch);
*(p+i) = v;
}
max_value = *(p+l);
}
// Part 2 : insert element in the k-th first lines
for (l=k*pitch;l<height*pitch;l+=pitch){
v = *(p+l);
if (v<max_value){
i=0; while (i<k*pitch && *(p+i)<=v) i+=pitch;
for (j=(k-1)*pitch;j>i;j-=pitch)
*(p+j) = *(p+j-pitch);
*(p+i) = v;
max_value = *(p+(k-1)*pitch);
}
}
}
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distance to the k-th nearest neighbor for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k-th nearest neighbor ; pointer to linear matrix
*
*/
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host){
unsigned int size_of_float = sizeof(float);
// Variables
float *dist_dev;
float *query_dev;
float *ref_dev;
float *query_norm;
float *ref_norm;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned int memory_total;
unsigned int memory_free;
hipError_t result;
// CUDA Initialisation
hipInit(0);
hipblasInit();
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
hipCtx_t cuContext;
hipDevice_t cuDevice=0;
hipCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
hipCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width * (height+1) ) / ( size_of_float * (height + ref_width + 1) );
max_nb_query_traited = min( query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points, ||query||, and for 2.R^T.Q
result = hipMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, (height + ref_width + 1));
if (result){
printErrorMessage(result, max_nb_query_traited * size_of_float * ( height + ref_width + 1 ) );
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
query_norm = query_dev + height * query_pitch;
dist_dev = query_norm + query_pitch;
// Allocation of global memory for reference points and ||query||
result = hipMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height+1);
if (result){
printErrorMessage(result, ref_width * size_of_float * ( height+1 ));
hipFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes / size_of_float;
ref_norm = ref_dev + height * ref_pitch;
// Memory copy of ref_host in ref_dev
result = hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, hipMemcpyHostToDevice);
// Computation of reference square norm
dim3 G_ref_norm(ref_width/256, 1, 1);
dim3 T_ref_norm(256, 1, 1);
if (ref_width%256 != 0) G_ref_norm.x += 1;
hipLaunchKernelGGL(( cuComputeNorm), dim3(G_ref_norm),dim3(T_ref_norm), 0, 0, ref_dev, ref_width, ref_pitch, height, ref_norm);
// Main loop: split queries to fit in GPU memory
for (int i=0;i<query_width;i+=max_nb_query_traited){
// Nomber of query points actually used
actual_nb_query_width = min(max_nb_query_traited, query_width-i);
// Memory copy of ref_host in ref_dev
hipMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, hipMemcpyHostToDevice);
// Computation of Q square norm
dim3 G_query_norm(actual_nb_query_width/256, 1, 1);
dim3 T_query_norm(256, 1, 1);
if (actual_nb_query_width%256 != 0) G_query_norm.x += 1;
hipLaunchKernelGGL(( cuComputeNorm), dim3(G_query_norm),dim3(T_query_norm), 0, 0, query_dev, actual_nb_query_width, query_pitch, height, query_norm);
// Computation of Q*transpose(R)
hipblasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, height, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch);
// Add R norm to distances
dim3 grid(actual_nb_query_width/16, ref_width/16, 1);
dim3 thread(16, 16, 1);
if (actual_nb_query_width%16 != 0) grid.x += 1;
if (ref_width%16 != 0) grid.y += 1;
hipLaunchKernelGGL(( cuAddRNorm), dim3(grid),dim3(thread), 0, 0, dist_dev, actual_nb_query_width, query_pitch, ref_width,ref_norm);
// Sort each column
hipLaunchKernelGGL(( cuInsertionSort), dim3(G_query_norm),dim3(T_query_norm), 0, 0, dist_dev,actual_nb_query_width,query_pitch,ref_width,k);
// Add Q norm and compute Sqrt ONLY ON ROW K-1
hipLaunchKernelGGL(( cuAddQNormAndSqrt), dim3(G_query_norm),dim3(T_query_norm), 0, 0, dist_dev+(k-1)*query_pitch, query_norm, actual_nb_query_width);
// Memory copy
hipMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev+(k-1)*query_pitch, query_pitch_in_bytes, actual_nb_query_width*size_of_float, 1, hipMemcpyDeviceToHost);
}
// Free memory
hipFree(ref_dev);
hipFree(query_dev);
// CUBLAS shutdown
hipblasShutdown();
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACES & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of dist array
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width,1,mxSINGLE_CLASS,mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
int main(void){
// Variables and parameters
float* ref; // Pointer to reference point array
float* query; // Pointer to query point array
float* dist; // Pointer to distance array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points, max=8192
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int i;
// Memory allocation
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * sizeof(float));
// Init
srand(time(NULL));
for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb );
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim );
printf("Number of neighbors to consider : %4d\n", k );
printf("Processing kNN search :" );
// Call kNN search CUDA
hipEventRecord(start, 0);
for (i=0; i<iterations; i++)
knn(ref, ref_nb, query, query_nb, dim, k, dist);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// Destroy cuda event object and free memory
hipEventDestroy(start);
hipEventDestroy(stop);
free(dist);
free(query);
free(ref);
}
#endif
|
e887deda2f287d6d03e92e43460a5d32a1720e86.cu
|
/**
*
* Date 11 june 2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== the distance between each query point and its k-th nearest neighbor in
* the reference point set. Only the distance is provided. The computation
* is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 1
// Includes
#include <stdio.h>
#include <math.h>
#include "cuda.h"
#include "cublas.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Given a matrix of size width*height, compute the square norm of each column.
*
* @param mat : the matrix
* @param width : the number of columns for a colum major storage matrix
* @param height : the number of rowm for a colum major storage matrix
* @param norm : the vector containing the norm of the matrix
*/
__global__ void cuComputeNorm(float *mat, int width, int pitch, int height, float *norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
float val, sum=0;
int i;
for (i=0;i<height;i++){
val = mat[i*pitch+xIndex];
sum += val*val;
}
norm[xIndex] = sum;
}
}
/**
* Given the distance matrix of size width*height, adds the column vector
* of size 1*height to each column of the matrix.
*
* @param dist : the matrix
* @param width : the number of columns for a colum major storage matrix
* @param pitch : the pitch in number of column
* @param height : the number of rowm for a colum major storage matrix
* @param vec : the vector to be added
*/
__global__ void cuAddRNorm(float *dist, int width, int pitch, int height, float *vec){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty]=vec[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
dist[yIndex*pitch+xIndex]+=shared_vec[ty];
}
/**
* Given two row vectors with width column, adds the two vectors and compute
* the square root of the sum. The result is stored in the first vector.
*
* @param vec1 : the first vector
* @param vec2 : the second vector
* @param width : the number of columns for a colum major storage matrix
*/
__global__ void cuAddQNormAndSqrt(float *vec1, float *vec2, int width){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
vec1[xIndex] = sqrt(vec1[xIndex]+vec2[xIndex]);
}
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param height height of the distance matrix
* @param k number of smallest distance to consider
*/
__global__ void cuInsertionSort(float *dist, int width, int pitch, int height, int k){
// Variables
int l,i,j;
float *p;
float v, max_value;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift and max value
p = dist+xIndex;
max_value = *p;
// Part 1 : sort kth firt element
for (l=pitch;l<k*pitch;l+=pitch){
v = *(p+l);
if (v<max_value){
i=0; while (i<l && *(p+i)<=v) i+=pitch;
for (j=l;j>i;j-=pitch)
*(p+j) = *(p+j-pitch);
*(p+i) = v;
}
max_value = *(p+l);
}
// Part 2 : insert element in the k-th first lines
for (l=k*pitch;l<height*pitch;l+=pitch){
v = *(p+l);
if (v<max_value){
i=0; while (i<k*pitch && *(p+i)<=v) i+=pitch;
for (j=(k-1)*pitch;j>i;j-=pitch)
*(p+j) = *(p+j-pitch);
*(p+i) = v;
max_value = *(p+(k-1)*pitch);
}
}
}
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distance to the k-th nearest neighbor for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k-th nearest neighbor ; pointer to linear matrix
*
*/
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host){
unsigned int size_of_float = sizeof(float);
// Variables
float *dist_dev;
float *query_dev;
float *ref_dev;
float *query_norm;
float *ref_norm;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned int memory_total;
unsigned int memory_free;
cudaError_t result;
// CUDA Initialisation
cuInit(0);
cublasInit();
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
CUcontext cuContext;
CUdevice cuDevice=0;
cuCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
cuCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width * (height+1) ) / ( size_of_float * (height + ref_width + 1) );
max_nb_query_traited = min( query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points, ||query||, and for 2.R^T.Q
result = cudaMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, (height + ref_width + 1));
if (result){
printErrorMessage(result, max_nb_query_traited * size_of_float * ( height + ref_width + 1 ) );
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
query_norm = query_dev + height * query_pitch;
dist_dev = query_norm + query_pitch;
// Allocation of global memory for reference points and ||query||
result = cudaMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height+1);
if (result){
printErrorMessage(result, ref_width * size_of_float * ( height+1 ));
cudaFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes / size_of_float;
ref_norm = ref_dev + height * ref_pitch;
// Memory copy of ref_host in ref_dev
result = cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, cudaMemcpyHostToDevice);
// Computation of reference square norm
dim3 G_ref_norm(ref_width/256, 1, 1);
dim3 T_ref_norm(256, 1, 1);
if (ref_width%256 != 0) G_ref_norm.x += 1;
cuComputeNorm<<<G_ref_norm,T_ref_norm>>>(ref_dev, ref_width, ref_pitch, height, ref_norm);
// Main loop: split queries to fit in GPU memory
for (int i=0;i<query_width;i+=max_nb_query_traited){
// Nomber of query points actually used
actual_nb_query_width = min(max_nb_query_traited, query_width-i);
// Memory copy of ref_host in ref_dev
cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice);
// Computation of Q square norm
dim3 G_query_norm(actual_nb_query_width/256, 1, 1);
dim3 T_query_norm(256, 1, 1);
if (actual_nb_query_width%256 != 0) G_query_norm.x += 1;
cuComputeNorm<<<G_query_norm,T_query_norm>>>(query_dev, actual_nb_query_width, query_pitch, height, query_norm);
// Computation of Q*transpose(R)
cublasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, height, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch);
// Add R norm to distances
dim3 grid(actual_nb_query_width/16, ref_width/16, 1);
dim3 thread(16, 16, 1);
if (actual_nb_query_width%16 != 0) grid.x += 1;
if (ref_width%16 != 0) grid.y += 1;
cuAddRNorm<<<grid,thread>>>(dist_dev, actual_nb_query_width, query_pitch, ref_width,ref_norm);
// Sort each column
cuInsertionSort<<<G_query_norm,T_query_norm>>>(dist_dev,actual_nb_query_width,query_pitch,ref_width,k);
// Add Q norm and compute Sqrt ONLY ON ROW K-1
cuAddQNormAndSqrt<<<G_query_norm,T_query_norm>>>( dist_dev+(k-1)*query_pitch, query_norm, actual_nb_query_width);
// Memory copy
cudaMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev+(k-1)*query_pitch, query_pitch_in_bytes, actual_nb_query_width*size_of_float, 1, cudaMemcpyDeviceToHost);
}
// Free memory
cudaFree(ref_dev);
cudaFree(query_dev);
// CUBLAS shutdown
cublasShutdown();
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACES & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of dist array
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width,1,mxSINGLE_CLASS,mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
int main(void){
// Variables and parameters
float* ref; // Pointer to reference point array
float* query; // Pointer to query point array
float* dist; // Pointer to distance array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points, max=8192
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int i;
// Memory allocation
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * sizeof(float));
// Init
srand(time(NULL));
for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb );
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim );
printf("Number of neighbors to consider : %4d\n", k );
printf("Processing kNN search :" );
// Call kNN search CUDA
cudaEventRecord(start, 0);
for (i=0; i<iterations; i++)
knn(ref, ref_nb, query, query_nb, dim, k, dist);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// Destroy cuda event object and free memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(dist);
free(query);
free(ref);
}
#endif
|
d24f6ce65d3f157b5aa3c1bd96ac510c80da3d0f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/driver.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<ExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, left, right);
dh::TemporaryArray<ExpandEntry> entries(2);
auto d_entries = entries.data().get();
dh::LaunchN(device_id, 1, [=] __device__(size_t idx) {
d_entries[0] =
ExpandEntry(left_nidx, candidate.depth + 1, d_splits_out[0]);
d_entries[1] =
ExpandEntry(right_nidx, candidate.depth + 1, d_splits_out[1]);
});
dh::safe_cuda(hipMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(ExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
return ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = ExpandEntry();
new_candidates[i * 2 + 1] = ExpandEntry();
}
}
dh::safe_cuda(hipDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
d24f6ce65d3f157b5aa3c1bd96ac510c80da3d0f.cu
|
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/driver.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<ExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, left, right);
dh::TemporaryArray<ExpandEntry> entries(2);
auto d_entries = entries.data().get();
dh::LaunchN(device_id, 1, [=] __device__(size_t idx) {
d_entries[0] =
ExpandEntry(left_nidx, candidate.depth + 1, d_splits_out[0]);
d_entries[1] =
ExpandEntry(right_nidx, candidate.depth + 1, d_splits_out[1]);
});
dh::safe_cuda(cudaMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(ExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
return ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = ExpandEntry();
new_candidates[i * 2 + 1] = ExpandEntry();
}
}
dh::safe_cuda(cudaDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
77751583b552d398a4c0e1570f3204473884a751.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define imin(a,b) (a<b?a:b)
int n, m;
double result;
double * a;
double * dev_a;
void init (int argc, char* argv[]){
assert(argc == 3);
n = atoi(argv[1]);
m = atoi(argv[2]);
result = 0.0;
a = (double *)malloc(n*m*sizeof(double));
for (int i=0; i<n; i++) {
for (int j=0; j<m; j++)
a[i*m+j] = i*2.0 + j*1.0;
}
}
__global__ void kernel(double *dev_a, int n, int m){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
dev_a[m*row+col] = dev_a[m*row+col] * dev_a[m*row+col];
//dev_a[m*row+col] = 0.0;
}
int main (int argc, char* argv[]){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
init(argc, argv);
/*
for(int i = 0; i< n; i++){
printf("\n");
for(int j = 0; j<m; j++){
printf("%f ",a[i*m+j]);
}
} */
dim3 dimBlock(16,16);
int dimx = (int) ceil((double)n/dimBlock.x);
int dimy = (int) ceil((double)m/dimBlock.y);
// printf("dimx: %d, dimy: %d\n", dimx, dimy);
dim3 dimGrid(dimx,dimy);
int size = n*m*sizeof(double);
hipMalloc((void**)&dev_a, size);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_a, n,m);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( a, dev_a, size, hipMemcpyDeviceToHost);
//printf("done gpu stuff\n");
double total = 0.0;
for(int j = 0; j<m; j++){
double temp = 0.0;
for(int i = 0; i<n; i++){
temp+= a[i*m+j];
}
total+= sqrt(temp);
}
hipEventRecord(stop);
float secs = 0;
hipEventElapsedTime(&secs, start, stop);
secs = secs / 1000;
printf("%f\n", total);
#ifdef TIME
printf("Time: %.2f\n", secs);
#endif
free(a);
hipFree(dev_a);
}
|
77751583b552d398a4c0e1570f3204473884a751.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define imin(a,b) (a<b?a:b)
int n, m;
double result;
double * a;
double * dev_a;
void init (int argc, char* argv[]){
assert(argc == 3);
n = atoi(argv[1]);
m = atoi(argv[2]);
result = 0.0;
a = (double *)malloc(n*m*sizeof(double));
for (int i=0; i<n; i++) {
for (int j=0; j<m; j++)
a[i*m+j] = i*2.0 + j*1.0;
}
}
__global__ void kernel(double *dev_a, int n, int m){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
dev_a[m*row+col] = dev_a[m*row+col] * dev_a[m*row+col];
//dev_a[m*row+col] = 0.0;
}
int main (int argc, char* argv[]){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
init(argc, argv);
/*
for(int i = 0; i< n; i++){
printf("\n");
for(int j = 0; j<m; j++){
printf("%f ",a[i*m+j]);
}
} */
dim3 dimBlock(16,16);
int dimx = (int) ceil((double)n/dimBlock.x);
int dimy = (int) ceil((double)m/dimBlock.y);
// printf("dimx: %d, dimy: %d\n", dimx, dimy);
dim3 dimGrid(dimx,dimy);
int size = n*m*sizeof(double);
cudaMalloc((void**)&dev_a, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
kernel<<<dimGrid,dimBlock>>>(dev_a, n,m);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( a, dev_a, size, cudaMemcpyDeviceToHost);
//printf("done gpu stuff\n");
double total = 0.0;
for(int j = 0; j<m; j++){
double temp = 0.0;
for(int i = 0; i<n; i++){
temp+= a[i*m+j];
}
total+= sqrt(temp);
}
cudaEventRecord(stop);
float secs = 0;
cudaEventElapsedTime(&secs, start, stop);
secs = secs / 1000;
printf("%f\n", total);
#ifdef TIME
printf("Time: %.2f\n", secs);
#endif
free(a);
cudaFree(dev_a);
}
|
b336ba161995b9bf815adbf915bab86157eddc9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This sample is an implementation of a simple line-of-sight algorithm:
// Given a height map and a ray originating at some observation point,
// it computes all the points along the ray that are visible from the
// observation point.
// It is based on the description made in "Guy E. Blelloch. Vector models
// for data-parallel computing. MIT Press, 1990" and uses open source CUDA
// Thrust Library
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// includes, library
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, types
// Boolean
typedef unsigned char Bool;
enum
{
False = 0,
True = 1
};
// 2D height field
struct HeightField
{
int width;
float *height;
};
// Ray
struct Ray
{
float3 origin;
float2 dir;
int length;
float oneOverLength;
};
////////////////////////////////////////////////////////////////////////////////
// declaration, variables
// Height field texture reference
texture<float, 2, hipReadModeElementType> g_HeightFieldTex;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
__global__ void computeAngles_kernel(const Ray, float *);
__global__ void computeVisibilities_kernel(const float *, const float *, int, Bool *);
void lineOfSight_gold(const HeightField, const Ray, Bool *);
__device__ __host__ float2 getLocation(const Ray, int);
__device__ __host__ float getAngle(const Ray, float2, float);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
int res = runTest(argc, argv);
if (res != 1)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a line-of-sight test for CUDA
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
////////////////////////////////////////////////////////////////////////////
// Device initialization
printf("[%s] - Starting...\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
////////////////////////////////////////////////////////////////////////////
// Timer
// Create
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Number of iterations to get accurate timing
uint numIterations = 100;
////////////////////////////////////////////////////////////////////////////
// Height field
HeightField heightField;
// Allocate in host memory
int2 dim = make_int2(10000, 100);
heightField.width = dim.x;
thrust::host_vector<float> height(dim.x * dim.y);
heightField.height = (float *)&height[0];
//
// Fill in with an arbitrary sine surface
for (int x = 0; x < dim.x; ++x)
for (int y = 0; y < dim.y; ++y)
{
float amp = 0.1f * (x + y);
float period = 2.0f + amp;
*(heightField.height + dim.x * y + x) =
amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f);
}
// Allocate CUDA array in device memory
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *heightFieldArray;
checkCudaErrors(hipMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y));
// Initialize device memory
checkCudaErrors(hipMemcpyToArray(heightFieldArray, 0, 0, heightField.height,
dim.x * dim.y * sizeof(float), hipMemcpyHostToDevice));
// Set texture parameters
g_HeightFieldTex.addressMode[0] = hipAddressModeClamp;
g_HeightFieldTex.addressMode[1] = hipAddressModeClamp;
g_HeightFieldTex.filterMode = hipFilterModePoint;
g_HeightFieldTex.normalized = 0;
// Bind CUDA array to texture reference
checkCudaErrors(hipBindTextureToArray(g_HeightFieldTex, heightFieldArray,
channelDesc));
////////////////////////////////////////////////////////////////////////////
// Ray (starts at origin and traverses the height field diagonally)
Ray ray;
ray.origin = make_float3(0, 0, 2.0f);
int2 dir = make_int2(dim.x - 1, dim.y - 1);
ray.dir = make_float2((float)dir.x, (float)dir.y);
ray.length = max(abs(dir.x), abs(dir.y));
ray.oneOverLength = 1.0f / ray.length;
////////////////////////////////////////////////////////////////////////////
// View angles
// Allocate view angles for each point along the ray
thrust::device_vector<float> d_angles(ray.length);
// Allocate result of max-scan operation on the array of view angles
thrust::device_vector<float> d_scannedAngles(ray.length);
////////////////////////////////////////////////////////////////////////////
// Visibility results
// Allocate visibility results for each point along the ray
thrust::device_vector<Bool> d_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilitiesRef(ray.length);
////////////////////////////////////////////////////////////////////////////
// Reference solution
lineOfSight_gold(heightField, ray, (Bool *)&h_visibilitiesRef[0]);
////////////////////////////////////////////////////////////////////////////
// Device solution
// Execution configuration
//dim3 block(256);
//dim3 block(128);
//dim3 block(64);
//dim3 block(512);
dim3 block(1024);
dim3 grid((uint)ceil(ray.length / (double)block.x));
// Compute device solution
printf("Line of sight\n");
sdkStartTimer(&timer);
for (uint i = 0; i < numIterations; ++i)
{
// Compute view angle for each point along the ray
hipLaunchKernelGGL(( computeAngles_kernel), dim3(grid), dim3(block), 0, 0, ray, thrust::raw_pointer_cast(&d_angles[0]));
getLastCudaError("Kernel execution failed");
// Perform a max-scan operation on the array of view angles
thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>());
getLastCudaError("Kernel execution failed");
// Compute visibility results based on the array of view angles
// and its scanned version
hipLaunchKernelGGL(( computeVisibilities_kernel), dim3(grid), dim3(block), 0, 0, thrust::raw_pointer_cast(&d_angles[0]),
thrust::raw_pointer_cast(&d_scannedAngles[0]),
ray.length,
thrust::raw_pointer_cast(&d_visibilities[0]));
getLastCudaError("Kernel execution failed");
}
hipDeviceSynchronize();
sdkStopTimer(&timer);
getLastCudaError("Kernel execution failed");
// Copy visibility results back to the host
thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin());
// Compare device visibility results against reference results
bool res = compareData(thrust::raw_pointer_cast(&h_visibilitiesRef[0]),
thrust::raw_pointer_cast(&h_visibilities[0]), ray.length, 0.0f, 0.0f);
printf("Average time: %f ms\n\n", sdkGetTimerValue(&timer) / numIterations);
sdkResetTimer(&timer);
// Cleanup memory
checkCudaErrors(hipFreeArray(heightFieldArray));
return res;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute view angles for each point along the ray
//! @param ray ray
//! @param angles view angles
////////////////////////////////////////////////////////////////////////////////
__global__ void computeAngles_kernel(const Ray ray, float *angles)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < ray.length)
{
float2 location = getLocation(ray, i + 1);
float height = tex2D(g_HeightFieldTex, location.x, location.y);
float angle = getAngle(ray, location, height);
angles[i] = angle;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute visibility for each point along the ray
//! @param angles view angles
//! @param scannedAngles max-scanned view angles
//! @param numAngles number of view angles
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
__global__ void computeVisibilities_kernel(const float *angles,
const float *scannedAngles,
int numAngles,
Bool *visibilities)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numAngles)
{
visibilities[i] = scannedAngles[i] <= angles[i];
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set
//! @param heightField height field
//! @param ray ray
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
void lineOfSight_gold(const HeightField heightField, const Ray ray,
Bool *visibilities)
{
float angleMax = asinf(-1.0f);
for (int i = 0; i < ray.length; ++i)
{
float2 location = getLocation(ray, i + 1);
float height = *(heightField.height
+ heightField.width * (int)floorf(location.y)
+ (int)floorf(location.x));
float angle = getAngle(ray, location, height);
if (angle > angleMax)
{
angleMax = angle;
visibilities[i] = True;
}
else
{
visibilities[i] = False;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the 2D coordinates of the point located at i steps from the origin
//! of the ray
//! @param ray ray
//! @param i integer offset along the ray
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float2 getLocation(const Ray ray, int i)
{
float step = i * ray.oneOverLength;
return make_float2(ray.origin.x, ray.origin.y) + ray.dir * step;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the angle of view between a 3D point and the origin of the ray
//! @param ray ray
//! @param location 2D coordinates of the input point
//! @param height height of the input point
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float getAngle(const Ray ray, float2 location, float height)
{
float2 dir = location - make_float2(ray.origin.x, ray.origin.y);
return atanf((height - ray.origin.z) / length(dir));
}
|
b336ba161995b9bf815adbf915bab86157eddc9a.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This sample is an implementation of a simple line-of-sight algorithm:
// Given a height map and a ray originating at some observation point,
// it computes all the points along the ray that are visible from the
// observation point.
// It is based on the description made in "Guy E. Blelloch. Vector models
// for data-parallel computing. MIT Press, 1990" and uses open source CUDA
// Thrust Library
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// includes, library
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, types
// Boolean
typedef unsigned char Bool;
enum
{
False = 0,
True = 1
};
// 2D height field
struct HeightField
{
int width;
float *height;
};
// Ray
struct Ray
{
float3 origin;
float2 dir;
int length;
float oneOverLength;
};
////////////////////////////////////////////////////////////////////////////////
// declaration, variables
// Height field texture reference
texture<float, 2, cudaReadModeElementType> g_HeightFieldTex;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
__global__ void computeAngles_kernel(const Ray, float *);
__global__ void computeVisibilities_kernel(const float *, const float *, int, Bool *);
void lineOfSight_gold(const HeightField, const Ray, Bool *);
__device__ __host__ float2 getLocation(const Ray, int);
__device__ __host__ float getAngle(const Ray, float2, float);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
int res = runTest(argc, argv);
if (res != 1)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a line-of-sight test for CUDA
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
////////////////////////////////////////////////////////////////////////////
// Device initialization
printf("[%s] - Starting...\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
////////////////////////////////////////////////////////////////////////////
// Timer
// Create
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Number of iterations to get accurate timing
uint numIterations = 100;
////////////////////////////////////////////////////////////////////////////
// Height field
HeightField heightField;
// Allocate in host memory
int2 dim = make_int2(10000, 100);
heightField.width = dim.x;
thrust::host_vector<float> height(dim.x * dim.y);
heightField.height = (float *)&height[0];
//
// Fill in with an arbitrary sine surface
for (int x = 0; x < dim.x; ++x)
for (int y = 0; y < dim.y; ++y)
{
float amp = 0.1f * (x + y);
float period = 2.0f + amp;
*(heightField.height + dim.x * y + x) =
amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f);
}
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *heightFieldArray;
checkCudaErrors(cudaMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y));
// Initialize device memory
checkCudaErrors(cudaMemcpyToArray(heightFieldArray, 0, 0, heightField.height,
dim.x * dim.y * sizeof(float), cudaMemcpyHostToDevice));
// Set texture parameters
g_HeightFieldTex.addressMode[0] = cudaAddressModeClamp;
g_HeightFieldTex.addressMode[1] = cudaAddressModeClamp;
g_HeightFieldTex.filterMode = cudaFilterModePoint;
g_HeightFieldTex.normalized = 0;
// Bind CUDA array to texture reference
checkCudaErrors(cudaBindTextureToArray(g_HeightFieldTex, heightFieldArray,
channelDesc));
////////////////////////////////////////////////////////////////////////////
// Ray (starts at origin and traverses the height field diagonally)
Ray ray;
ray.origin = make_float3(0, 0, 2.0f);
int2 dir = make_int2(dim.x - 1, dim.y - 1);
ray.dir = make_float2((float)dir.x, (float)dir.y);
ray.length = max(abs(dir.x), abs(dir.y));
ray.oneOverLength = 1.0f / ray.length;
////////////////////////////////////////////////////////////////////////////
// View angles
// Allocate view angles for each point along the ray
thrust::device_vector<float> d_angles(ray.length);
// Allocate result of max-scan operation on the array of view angles
thrust::device_vector<float> d_scannedAngles(ray.length);
////////////////////////////////////////////////////////////////////////////
// Visibility results
// Allocate visibility results for each point along the ray
thrust::device_vector<Bool> d_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilitiesRef(ray.length);
////////////////////////////////////////////////////////////////////////////
// Reference solution
lineOfSight_gold(heightField, ray, (Bool *)&h_visibilitiesRef[0]);
////////////////////////////////////////////////////////////////////////////
// Device solution
// Execution configuration
//dim3 block(256);
//dim3 block(128);
//dim3 block(64);
//dim3 block(512);
dim3 block(1024);
dim3 grid((uint)ceil(ray.length / (double)block.x));
// Compute device solution
printf("Line of sight\n");
sdkStartTimer(&timer);
for (uint i = 0; i < numIterations; ++i)
{
// Compute view angle for each point along the ray
computeAngles_kernel<<<grid, block>>>(ray, thrust::raw_pointer_cast(&d_angles[0]));
getLastCudaError("Kernel execution failed");
// Perform a max-scan operation on the array of view angles
thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>());
getLastCudaError("Kernel execution failed");
// Compute visibility results based on the array of view angles
// and its scanned version
computeVisibilities_kernel<<<grid, block>>>(thrust::raw_pointer_cast(&d_angles[0]),
thrust::raw_pointer_cast(&d_scannedAngles[0]),
ray.length,
thrust::raw_pointer_cast(&d_visibilities[0]));
getLastCudaError("Kernel execution failed");
}
cudaDeviceSynchronize();
sdkStopTimer(&timer);
getLastCudaError("Kernel execution failed");
// Copy visibility results back to the host
thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin());
// Compare device visibility results against reference results
bool res = compareData(thrust::raw_pointer_cast(&h_visibilitiesRef[0]),
thrust::raw_pointer_cast(&h_visibilities[0]), ray.length, 0.0f, 0.0f);
printf("Average time: %f ms\n\n", sdkGetTimerValue(&timer) / numIterations);
sdkResetTimer(&timer);
// Cleanup memory
checkCudaErrors(cudaFreeArray(heightFieldArray));
return res;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute view angles for each point along the ray
//! @param ray ray
//! @param angles view angles
////////////////////////////////////////////////////////////////////////////////
__global__ void computeAngles_kernel(const Ray ray, float *angles)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < ray.length)
{
float2 location = getLocation(ray, i + 1);
float height = tex2D(g_HeightFieldTex, location.x, location.y);
float angle = getAngle(ray, location, height);
angles[i] = angle;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute visibility for each point along the ray
//! @param angles view angles
//! @param scannedAngles max-scanned view angles
//! @param numAngles number of view angles
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
__global__ void computeVisibilities_kernel(const float *angles,
const float *scannedAngles,
int numAngles,
Bool *visibilities)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numAngles)
{
visibilities[i] = scannedAngles[i] <= angles[i];
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set
//! @param heightField height field
//! @param ray ray
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
void lineOfSight_gold(const HeightField heightField, const Ray ray,
Bool *visibilities)
{
float angleMax = asinf(-1.0f);
for (int i = 0; i < ray.length; ++i)
{
float2 location = getLocation(ray, i + 1);
float height = *(heightField.height
+ heightField.width * (int)floorf(location.y)
+ (int)floorf(location.x));
float angle = getAngle(ray, location, height);
if (angle > angleMax)
{
angleMax = angle;
visibilities[i] = True;
}
else
{
visibilities[i] = False;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the 2D coordinates of the point located at i steps from the origin
//! of the ray
//! @param ray ray
//! @param i integer offset along the ray
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float2 getLocation(const Ray ray, int i)
{
float step = i * ray.oneOverLength;
return make_float2(ray.origin.x, ray.origin.y) + ray.dir * step;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the angle of view between a 3D point and the origin of the ray
//! @param ray ray
//! @param location 2D coordinates of the input point
//! @param height height of the input point
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float getAngle(const Ray ray, float2 location, float height)
{
float2 dir = location - make_float2(ray.origin.x, ray.origin.y);
return atanf((height - ray.origin.z) / length(dir));
}
|
9335b6055d8bf1fb78f3fbc0861ade4325c57b4e.hip
|
// !!! This is a file automatically generated by hipify!!!
// Low level matrix multiplication on GPU using CUDA with CURAND and CUBLAS
// C(m,n) = A(m,k) * B(k,n)
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <ctime>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <cmath>
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
// Randomization helpers
// adapted from https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/rocm-3.0/clients/include/rocblas_init.hpp#L42
void fill_sin(float *A, size_t nr_rows_A, size_t nr_cols_A){
for(size_t i = 0; i < nr_rows_A; ++i)
for(size_t j = 0; j < nr_cols_A; ++j)
A[i + j * nr_rows_A] = sin(float(i + j * nr_rows_A));
}
void fill_cos(float *A, size_t nr_rows_A, size_t nr_cols_A){
for(size_t i = 0; i < nr_rows_A; ++i)
for(size_t j = 0; j < nr_cols_A; ++j)
A[i + j * nr_rows_A] = cos(float(i + j * nr_rows_A));
}
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(0); \
} \
}
void setup_nvlink(){
int numGPUs = 4;
int i = 0;
for (int j = 0; j < numGPUs; j++) {
int access = 0;
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
printf("Enabling %d to %d\n", i, j);
hipSetDevice(i);
cudaCheckError();
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
cudaCheckError();
hipDeviceEnablePeerAccess(i, 0);
cudaCheckError();
hipSetDevice(i);
cudaCheckError();
}
fflush(stdout);
}
}
// Multiply the arrays A and B on GPU and save the result in C
// C(m,n) = A(m,k) * B(k,n)
void gpu_blas_mmul( hipblasHandle_t handle, const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda=m,ldb=k,ldc=m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
// Do the actual multiplication
hipblasStatus_t err = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
if (err != HIPBLAS_STATUS_SUCCESS)
std::cout << "Error: " << _cudaGetErrorEnum(err) << std::endl;
}
//Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[j * nr_rows_A + i] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char* argv[]) {
if (argc != 4){
std::cout << "USAGE: " << argv[0] <<" <size> <inner-reps> nlinks" <<std::endl ;
exit(-1);
}
int size = atoi(argv[1]);
int reps = atoi(argv[2]);
int nlinks = atoi(argv[3]);
setup_nvlink();
hipStream_t computeStream;
hipError_t result;
result = hipStreamCreateWithFlags(&computeStream, hipStreamNonBlocking);
hipStream_t copyStream, copyStream2;
hipStream_t copyStream3, copyStream4;
result = hipStreamCreateWithFlags(©Stream, hipStreamNonBlocking);
result = hipStreamCreateWithFlags(©Stream2, hipStreamNonBlocking);
result = hipStreamCreateWithFlags(©Stream3, hipStreamNonBlocking);
result = hipStreamCreateWithFlags(©Stream4, hipStreamNonBlocking);
// Allocate the src on CPU
long SIZE = 512*1024*1024;
// int* src = (int*) malloc(SIZE * sizeof(int));
int* src;
int *dest_h;
hipHostMalloc((void**) &src, SIZE * sizeof(int));
hipHostMalloc((void**) &dest_h, SIZE * sizeof(int));
for (int i = 0; i < SIZE ; ++i) {
src[i] = sin(i);
dest_h[i] = 1;
}
hipSetDevice(0);
// Allocate DST on gpu
int* dst;
hipMalloc(&dst, SIZE*sizeof(int));
// Allocate buffers on all cpus:
hipSetDevice(1);
int* src_1;
hipMalloc(&src_1, SIZE*sizeof(int));
hipMemcpy((void*)src_1, (void*)src, sizeof(int)*SIZE , hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipSetDevice(2);
int* src_2;
hipMalloc(&src_2, SIZE*sizeof(int));
hipMemcpy((void*)src_2, (void*)src, sizeof(int)*SIZE , hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipSetDevice(3);
int* src_3;
hipMalloc(&src_3, SIZE*sizeof(int));
hipMemcpy((void*)src_3, (void*)src, sizeof(int)*SIZE , hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipSetDevice(0);
hipDeviceSynchronize();
// Allocate 3 arrays on CPU
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
// for simplicity we are going to use square arrays
nr_rows_A = nr_cols_A = nr_rows_B = nr_cols_B = nr_rows_C = nr_cols_C = size;
float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, *d_B, *d_C;
hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float));
hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float));
hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float));
// If you already have useful values in A and B you can copy them in GPU:
// hipMemcpy(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyHostToDevice);
// hipMemcpy(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyHostToDevice);
// Fill the arrays A and B on GPU with random numbers
// GPU_fill_rand(d_A, nr_rows_A, nr_cols_A);
// GPU_fill_rand(d_B, nr_rows_B, nr_cols_B);
fill_sin(h_A, nr_rows_A, nr_cols_A);
fill_cos(h_B, nr_rows_B, nr_cols_B);
// Optionally we can copy the data back on CPU and print the arrays
hipMemcpyAsync(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyHostToDevice, computeStream);
hipMemcpyAsync(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyHostToDevice, computeStream);
// Optionally we can copy the data back on CPU and print the arrays
// hipMemcpyAsync(h_A,d_A,nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyDeviceToHost, computeStream);
// hipMemcpyAsync(h_B,d_B,nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyDeviceToHost, computeStream);
std::cout << "A =" << std::endl;
// print_matrix(h_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
// print_matrix(h_B, nr_rows_B, nr_cols_B);
hipMemcpyAsync((void*)dst, (void*)src, sizeof(int)*SIZE , hipMemcpyHostToDevice, copyStream);
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSetStream(handle, computeStream);
hipDeviceSynchronize();
for (int j = 0 ; j < 100; j++){
// Tabkes about 5 minuets
gpu_blas_mmul(handle, d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_cols_B);
for (int i=0; i< reps; i++){
// each stable copy takes about 162 miliseconds
// hipMemcpyAsync((void*)src, (void*)dst, sizeof(int) * SIZE, hipMemcpyDeviceToHost, copyStream);
// hipMemcpyAsync((void*)dest_h, (void*)dst, sizeof(int) * SIZE, hipMemcpyDeviceToHost, copyStream2);
if (nlinks >=1)
hipMemcpyAsync((void*)src_1, (void*)dst, sizeof(int)*SIZE , hipMemcpyDeviceToDevice, copyStream2);
if (nlinks >= 2)
hipMemcpyAsync((void*)src_2, (void*)dst, sizeof(int)*SIZE , hipMemcpyDeviceToDevice, copyStream3);
if (nlinks >= 3)
hipMemcpyAsync((void*)src_3, (void*)dst, sizeof(int)*SIZE , hipMemcpyDeviceToDevice, copyStream4);
}
// Create a handle for CUBLAS
hipStreamSynchronize(copyStream);
hipStreamSynchronize(copyStream2);
hipStreamSynchronize(copyStream3);
hipStreamSynchronize(copyStream4);
}
hipStreamSynchronize(computeStream);
hipStreamSynchronize(copyStream);
hipStreamSynchronize(copyStream2);
hipStreamSynchronize(copyStream3);
hipStreamSynchronize(copyStream4);
// Destroy the handle
hipblasDestroy(handle);
// Copy (and print) the result on host memory
hipMemcpyAsync(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost, computeStream);
std::cout << "C =" << std::endl;
// print_matrix(h_C, nr_rows_C, nr_cols_C);
//Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipHostFree(src);
hipHostFree(dest_h);
hipFree(dst);
hipSetDevice(1);
hipFree(src_1);
hipSetDevice(2);
hipFree(src_2);
hipSetDevice(3);
hipFree(src_3);
result = hipStreamDestroy(computeStream);
result = hipStreamDestroy(copyStream);
result = hipStreamDestroy(copyStream2);
result = hipStreamDestroy(copyStream3);
result = hipStreamDestroy(copyStream4);
// Free CPU memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
9335b6055d8bf1fb78f3fbc0861ade4325c57b4e.cu
|
// Low level matrix multiplication on GPU using CUDA with CURAND and CUBLAS
// C(m,n) = A(m,k) * B(k,n)
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <ctime>
#include <cublas_v2.h>
#include <curand.h>
#include <cmath>
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
// Randomization helpers
// adapted from https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/rocm-3.0/clients/include/rocblas_init.hpp#L42
void fill_sin(float *A, size_t nr_rows_A, size_t nr_cols_A){
for(size_t i = 0; i < nr_rows_A; ++i)
for(size_t j = 0; j < nr_cols_A; ++j)
A[i + j * nr_rows_A] = sin(float(i + j * nr_rows_A));
}
void fill_cos(float *A, size_t nr_rows_A, size_t nr_cols_A){
for(size_t i = 0; i < nr_rows_A; ++i)
for(size_t j = 0; j < nr_cols_A; ++j)
A[i + j * nr_rows_A] = cos(float(i + j * nr_rows_A));
}
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0); \
} \
}
void setup_nvlink(){
int numGPUs = 4;
int i = 0;
for (int j = 0; j < numGPUs; j++) {
int access = 0;
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
printf("Enabling %d to %d\n", i, j);
cudaSetDevice(i);
cudaCheckError();
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaCheckError();
cudaDeviceEnablePeerAccess(i, 0);
cudaCheckError();
cudaSetDevice(i);
cudaCheckError();
}
fflush(stdout);
}
}
// Multiply the arrays A and B on GPU and save the result in C
// C(m,n) = A(m,k) * B(k,n)
void gpu_blas_mmul( cublasHandle_t handle, const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda=m,ldb=k,ldc=m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
// Do the actual multiplication
cublasStatus_t err = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
if (err != CUBLAS_STATUS_SUCCESS)
std::cout << "Error: " << _cudaGetErrorEnum(err) << std::endl;
}
//Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[j * nr_rows_A + i] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char* argv[]) {
if (argc != 4){
std::cout << "USAGE: " << argv[0] <<" <size> <inner-reps> nlinks" <<std::endl ;
exit(-1);
}
int size = atoi(argv[1]);
int reps = atoi(argv[2]);
int nlinks = atoi(argv[3]);
setup_nvlink();
cudaStream_t computeStream;
cudaError_t result;
result = cudaStreamCreateWithFlags(&computeStream, cudaStreamNonBlocking);
cudaStream_t copyStream, copyStream2;
cudaStream_t copyStream3, copyStream4;
result = cudaStreamCreateWithFlags(©Stream, cudaStreamNonBlocking);
result = cudaStreamCreateWithFlags(©Stream2, cudaStreamNonBlocking);
result = cudaStreamCreateWithFlags(©Stream3, cudaStreamNonBlocking);
result = cudaStreamCreateWithFlags(©Stream4, cudaStreamNonBlocking);
// Allocate the src on CPU
long SIZE = 512*1024*1024;
// int* src = (int*) malloc(SIZE * sizeof(int));
int* src;
int *dest_h;
cudaMallocHost((void**) &src, SIZE * sizeof(int));
cudaMallocHost((void**) &dest_h, SIZE * sizeof(int));
for (int i = 0; i < SIZE ; ++i) {
src[i] = sin(i);
dest_h[i] = 1;
}
cudaSetDevice(0);
// Allocate DST on gpu
int* dst;
cudaMalloc(&dst, SIZE*sizeof(int));
// Allocate buffers on all cpus:
cudaSetDevice(1);
int* src_1;
cudaMalloc(&src_1, SIZE*sizeof(int));
cudaMemcpy((void*)src_1, (void*)src, sizeof(int)*SIZE , cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaSetDevice(2);
int* src_2;
cudaMalloc(&src_2, SIZE*sizeof(int));
cudaMemcpy((void*)src_2, (void*)src, sizeof(int)*SIZE , cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaSetDevice(3);
int* src_3;
cudaMalloc(&src_3, SIZE*sizeof(int));
cudaMemcpy((void*)src_3, (void*)src, sizeof(int)*SIZE , cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaDeviceSynchronize();
// Allocate 3 arrays on CPU
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
// for simplicity we are going to use square arrays
nr_rows_A = nr_cols_A = nr_rows_B = nr_cols_B = nr_rows_C = nr_cols_C = size;
float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float));
cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float));
cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float));
// If you already have useful values in A and B you can copy them in GPU:
// cudaMemcpy(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyHostToDevice);
// cudaMemcpy(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyHostToDevice);
// Fill the arrays A and B on GPU with random numbers
// GPU_fill_rand(d_A, nr_rows_A, nr_cols_A);
// GPU_fill_rand(d_B, nr_rows_B, nr_cols_B);
fill_sin(h_A, nr_rows_A, nr_cols_A);
fill_cos(h_B, nr_rows_B, nr_cols_B);
// Optionally we can copy the data back on CPU and print the arrays
cudaMemcpyAsync(d_A,h_A,nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyHostToDevice, computeStream);
cudaMemcpyAsync(d_B,h_B,nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyHostToDevice, computeStream);
// Optionally we can copy the data back on CPU and print the arrays
// cudaMemcpyAsync(h_A,d_A,nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyDeviceToHost, computeStream);
// cudaMemcpyAsync(h_B,d_B,nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyDeviceToHost, computeStream);
std::cout << "A =" << std::endl;
// print_matrix(h_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
// print_matrix(h_B, nr_rows_B, nr_cols_B);
cudaMemcpyAsync((void*)dst, (void*)src, sizeof(int)*SIZE , cudaMemcpyHostToDevice, copyStream);
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
cublasSetStream(handle, computeStream);
cudaDeviceSynchronize();
for (int j = 0 ; j < 100; j++){
// Tabkes about 5 minuets
gpu_blas_mmul(handle, d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_cols_B);
for (int i=0; i< reps; i++){
// each stable copy takes about 162 miliseconds
// cudaMemcpyAsync((void*)src, (void*)dst, sizeof(int) * SIZE, cudaMemcpyDeviceToHost, copyStream);
// cudaMemcpyAsync((void*)dest_h, (void*)dst, sizeof(int) * SIZE, cudaMemcpyDeviceToHost, copyStream2);
if (nlinks >=1)
cudaMemcpyAsync((void*)src_1, (void*)dst, sizeof(int)*SIZE , cudaMemcpyDeviceToDevice, copyStream2);
if (nlinks >= 2)
cudaMemcpyAsync((void*)src_2, (void*)dst, sizeof(int)*SIZE , cudaMemcpyDeviceToDevice, copyStream3);
if (nlinks >= 3)
cudaMemcpyAsync((void*)src_3, (void*)dst, sizeof(int)*SIZE , cudaMemcpyDeviceToDevice, copyStream4);
}
// Create a handle for CUBLAS
cudaStreamSynchronize(copyStream);
cudaStreamSynchronize(copyStream2);
cudaStreamSynchronize(copyStream3);
cudaStreamSynchronize(copyStream4);
}
cudaStreamSynchronize(computeStream);
cudaStreamSynchronize(copyStream);
cudaStreamSynchronize(copyStream2);
cudaStreamSynchronize(copyStream3);
cudaStreamSynchronize(copyStream4);
// Destroy the handle
cublasDestroy(handle);
// Copy (and print) the result on host memory
cudaMemcpyAsync(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost, computeStream);
std::cout << "C =" << std::endl;
// print_matrix(h_C, nr_rows_C, nr_cols_C);
//Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFreeHost(src);
cudaFreeHost(dest_h);
cudaFree(dst);
cudaSetDevice(1);
cudaFree(src_1);
cudaSetDevice(2);
cudaFree(src_2);
cudaSetDevice(3);
cudaFree(src_3);
result = cudaStreamDestroy(computeStream);
result = cudaStreamDestroy(copyStream);
result = cudaStreamDestroy(copyStream2);
result = cudaStreamDestroy(copyStream3);
result = cudaStreamDestroy(copyStream4);
// Free CPU memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
09fd511074b9dbc571f9f2bb5d899d8d2f486d3c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include <vector>
using namespace std;
#if !defined density
#define density 0.0005
#endif
#if !defined mass
#define mass 0.01
#endif
#if !defined cutoff
#define cutoff 0.01
#endif
#if !defined min_r
#define min_r (cutoff/100)
#endif
#if !defined dt
#define dt 0.0005
#endif
#define NUM_THREADS 256
extern double size;
//
// benchmarking program
//
// calculate particle's bin number
__device__ int calculateBinNum(particle_t &p, int binsPerSide)
{
return ( floor(p.x/cutoff) + binsPerSide*floor(p.y/cutoff) );
}
__device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor)
{
double dx = neighbor.x - particle.x;
double dy = neighbor.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
__global__ void setupParticleBin(particle_t * particles, int n, particle_t * bins, int numbins, int binsPerSide, int* binSizes) {
// clear bins at each time step
for (int m = 0; m < numbins; m++) {
binSizes[m] = 0;
}
// place particles in bins
for (int i = 0; i < n; i++) {
int binNumber = calculateBinNum(particles[i],binsPerSide);
int indexInBin = binSizes[binNumber];
bins[binNumber*n + indexInBin] = particles[i];
binSizes[binNumber] ++;
}
}
__global__ void compute_forces_gpu(particle_t * particles, int n, particle_t * bins, int numbins, int binsPerSide, int* binSizes)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particles[tid].ax = particles[tid].ay = 0;
// find current particle's bin, handle boundaries
int cbin = calculateBinNum( particles[tid], binsPerSide );
int lowi = -1, highi = 1, lowj = -1, highj = 1;
if (cbin < binsPerSide) lowj = 0;
if (cbin % binsPerSide == 0) lowi = 0;
if (cbin % binsPerSide == (binsPerSide-1)) highi = 0;
if (cbin >= binsPerSide*(binsPerSide-1)) highj = 0;
// apply nearby forces
for (int i = lowi; i <= highi; i++) {
for (int j = lowj; j <= highj; j++)
{
int nbin = cbin + i + binsPerSide*j;
for (int indexInBin = 0; indexInBin < binSizes[nbin]; indexInBin++) {
apply_force_gpu(particles[tid], bins[nbin*n + indexInBin]);
}
//for (int k = 0; k < bins[nbin].size(); k++ ) {
//apply_force( local[p], *bins[nbin][k], &dmin, &davg, &navg);
// apply_force_gpu(particles[tid], particles[j]);
//}
}
}
//for(int j = 0 ; j < n ; j++) {
// apply_force_gpu(particles[tid], particles[j]);
//}
}
__global__ void move_gpu (particle_t * particles, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particle_t * p = &particles[tid];
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p->vx += p->ax * dt;
p->vy += p->ay * dt;
p->x += p->vx * dt;
p->y += p->vy * dt;
//
// bounce from walls
//
while( p->x < 0 || p->x > size )
{
p->x = p->x < 0 ? -(p->x) : 2*size-p->x;
p->vx = -(p->vx);
}
while( p->y < 0 || p->y > size )
{
p->y = p->y < 0 ? -(p->y) : 2*size-p->y;
p->vy = -(p->vy);
}
}
int main( int argc, char **argv )
{
// This takes a few seconds to initialize the runtime
hipDeviceSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
printf( "-s <filename> to specify the summary output file name\n" );
return 0;
}
int n = read_int( argc, argv, "-n", 1000 );
char *savename = read_string( argc, argv, "-o", NULL );
char *sumname = read_string( argc, argv, "-s", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
FILE *fsum = sumname ? fopen(sumname,"a") : NULL;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
// create spatial bins (of size cutoff by cutoff)
double size = sqrt( density*n );
int binsPerSide = ceil(size/cutoff);
int numbins = binsPerSide*binsPerSide;
particle_t* bins = (particle_t *) malloc(n * sizeof(particle_t) * numbins);
int* binSizes = (int *) malloc(numbins * sizeof(int));
// GPU particle data structure
particle_t * d_particles;
hipMalloc((void **) &d_particles, n * sizeof(particle_t));
particle_t * d_bins;
hipMalloc((void **) &d_bins, n * sizeof(particle_t) * numbins);
int * d_binSizes;
hipMalloc((void **) &d_binSizes, sizeof(int) * numbins);
set_size( n );
init_particles( n, particles );
hipDeviceSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
hipMemcpy(d_particles, particles, n * sizeof(particle_t), hipMemcpyHostToDevice);
hipDeviceSynchronize();
copy_time = read_timer( ) - copy_time;
//
// simulate a number of time steps
//
hipDeviceSynchronize();
double simulation_time = read_timer( );
for( int step = 0; step < NSTEPS; step++ )
{
//
// compute forces
//
hipLaunchKernelGGL(( setupParticleBin) , dim3(1), dim3(1) , 0, 0, d_particles, n, d_bins, numbins, binsPerSide, d_binSizes);
int blks = (n + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, d_bins, numbins, binsPerSide, d_binSizes);
//
// move particles
//
hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size);
//
// save if necessary
//
if( fsave && (step%SAVEFREQ) == 0 ) {
// Copy the particles back to the CPU
hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost);
save( fsave, n, particles);
}
}
hipDeviceSynchronize();
simulation_time = read_timer( ) - simulation_time;
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
if (fsum)
fprintf(fsum,"%d %lf \n",n,simulation_time);
if (fsum)
fclose( fsum );
free( particles );
free( bins );
hipFree(d_particles);
hipFree(d_bins);
if( fsave )
fclose( fsave );
return 0;
}
|
09fd511074b9dbc571f9f2bb5d899d8d2f486d3c.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <cuda.h>
#include "common.h"
#include <vector>
using namespace std;
#if !defined density
#define density 0.0005
#endif
#if !defined mass
#define mass 0.01
#endif
#if !defined cutoff
#define cutoff 0.01
#endif
#if !defined min_r
#define min_r (cutoff/100)
#endif
#if !defined dt
#define dt 0.0005
#endif
#define NUM_THREADS 256
extern double size;
//
// benchmarking program
//
// calculate particle's bin number
__device__ int calculateBinNum(particle_t &p, int binsPerSide)
{
return ( floor(p.x/cutoff) + binsPerSide*floor(p.y/cutoff) );
}
__device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor)
{
double dx = neighbor.x - particle.x;
double dy = neighbor.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
__global__ void setupParticleBin(particle_t * particles, int n, particle_t * bins, int numbins, int binsPerSide, int* binSizes) {
// clear bins at each time step
for (int m = 0; m < numbins; m++) {
binSizes[m] = 0;
}
// place particles in bins
for (int i = 0; i < n; i++) {
int binNumber = calculateBinNum(particles[i],binsPerSide);
int indexInBin = binSizes[binNumber];
bins[binNumber*n + indexInBin] = particles[i];
binSizes[binNumber] ++;
}
}
__global__ void compute_forces_gpu(particle_t * particles, int n, particle_t * bins, int numbins, int binsPerSide, int* binSizes)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particles[tid].ax = particles[tid].ay = 0;
// find current particle's bin, handle boundaries
int cbin = calculateBinNum( particles[tid], binsPerSide );
int lowi = -1, highi = 1, lowj = -1, highj = 1;
if (cbin < binsPerSide) lowj = 0;
if (cbin % binsPerSide == 0) lowi = 0;
if (cbin % binsPerSide == (binsPerSide-1)) highi = 0;
if (cbin >= binsPerSide*(binsPerSide-1)) highj = 0;
// apply nearby forces
for (int i = lowi; i <= highi; i++) {
for (int j = lowj; j <= highj; j++)
{
int nbin = cbin + i + binsPerSide*j;
for (int indexInBin = 0; indexInBin < binSizes[nbin]; indexInBin++) {
apply_force_gpu(particles[tid], bins[nbin*n + indexInBin]);
}
//for (int k = 0; k < bins[nbin].size(); k++ ) {
//apply_force( local[p], *bins[nbin][k], &dmin, &davg, &navg);
// apply_force_gpu(particles[tid], particles[j]);
//}
}
}
//for(int j = 0 ; j < n ; j++) {
// apply_force_gpu(particles[tid], particles[j]);
//}
}
__global__ void move_gpu (particle_t * particles, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particle_t * p = &particles[tid];
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p->vx += p->ax * dt;
p->vy += p->ay * dt;
p->x += p->vx * dt;
p->y += p->vy * dt;
//
// bounce from walls
//
while( p->x < 0 || p->x > size )
{
p->x = p->x < 0 ? -(p->x) : 2*size-p->x;
p->vx = -(p->vx);
}
while( p->y < 0 || p->y > size )
{
p->y = p->y < 0 ? -(p->y) : 2*size-p->y;
p->vy = -(p->vy);
}
}
int main( int argc, char **argv )
{
// This takes a few seconds to initialize the runtime
cudaThreadSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
printf( "-s <filename> to specify the summary output file name\n" );
return 0;
}
int n = read_int( argc, argv, "-n", 1000 );
char *savename = read_string( argc, argv, "-o", NULL );
char *sumname = read_string( argc, argv, "-s", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
FILE *fsum = sumname ? fopen(sumname,"a") : NULL;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
// create spatial bins (of size cutoff by cutoff)
double size = sqrt( density*n );
int binsPerSide = ceil(size/cutoff);
int numbins = binsPerSide*binsPerSide;
particle_t* bins = (particle_t *) malloc(n * sizeof(particle_t) * numbins);
int* binSizes = (int *) malloc(numbins * sizeof(int));
// GPU particle data structure
particle_t * d_particles;
cudaMalloc((void **) &d_particles, n * sizeof(particle_t));
particle_t * d_bins;
cudaMalloc((void **) &d_bins, n * sizeof(particle_t) * numbins);
int * d_binSizes;
cudaMalloc((void **) &d_binSizes, sizeof(int) * numbins);
set_size( n );
init_particles( n, particles );
cudaThreadSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
copy_time = read_timer( ) - copy_time;
//
// simulate a number of time steps
//
cudaThreadSynchronize();
double simulation_time = read_timer( );
for( int step = 0; step < NSTEPS; step++ )
{
//
// compute forces
//
setupParticleBin <<< 1, 1 >>> (d_particles, n, d_bins, numbins, binsPerSide, d_binSizes);
int blks = (n + NUM_THREADS - 1) / NUM_THREADS;
compute_forces_gpu <<< blks, NUM_THREADS >>> (d_particles, n, d_bins, numbins, binsPerSide, d_binSizes);
//
// move particles
//
move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size);
//
// save if necessary
//
if( fsave && (step%SAVEFREQ) == 0 ) {
// Copy the particles back to the CPU
cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
save( fsave, n, particles);
}
}
cudaThreadSynchronize();
simulation_time = read_timer( ) - simulation_time;
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
if (fsum)
fprintf(fsum,"%d %lf \n",n,simulation_time);
if (fsum)
fclose( fsum );
free( particles );
free( bins );
cudaFree(d_particles);
cudaFree(d_bins);
if( fsave )
fclose( fsave );
return 0;
}
|
2ee71f209c66261b927ce351b5e19aeef02d7ff1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../include/reduction.h"
#include "../../include/utilities.h"
/**
* Performs an optimized reduction step to find Iup, Ilow, Bup and Blow
* @param d_ytraindata device pointer to the array of binary labels
* @param d_atraindata device pointer to the array of alphas
* @param d_fdata device pointer to the array of fs
* @param h_bup host pointer to the local bup values
* @param h_blow host pointer to the local blow values
* @param h_Iup host pointer to the local Iup values
* @param h_Ilow host pointer to the local Ilow values
* @param d_bup device pointer to the local bup values
* @param d_blow device pointer to the local blow values
* @param d_Iup device pointer to the local Iup values
* @param d_Ilow device pointer to the local Ilow values
* @param h_bup_global host pointer to the global bup values
* @param h_blow_global host pointer to the global blow values
* @param h_Iup_global host pointer to the global Iup values
* @param h_Ilow_global host pointer to the global Ilow values
* @param d_bup_global device pointer to the global bup values
* @param d_blow_global device pointer to the global blow values
* @param d_Iup_global device pointer to the global Iup values
* @param d_Ilow_global device pointer to the global Ilow values
* @param h_done host pointer to the array with the status of each binary task
* @param d_done_device pointer to the array with the status of each binary task
* @param d_active device pointer to the array with active binary tasks
* @param numthreads number of threads per block
* @param numBlockRed number of blocks in the reduction
* @param ntraining number of training samples in the training set
* @param ntasks number of binary tasks to be solved
* @param activeTasks number of active tasks
* @param d_C device pointer to the array of regularization parameters
*/
void reductionstep( int* d_ytraindata,
float* d_atraindata,
float* d_fdata,
float* h_bup,
float* h_blow,
int* h_Iup,
int* h_Ilow,
float* d_bup,
float* d_blow,
int* d_Iup,
int* d_Ilow,
float* h_bup_global,
float* h_blow_global,
int* h_Iup_global,
int* h_Ilow_global,
float* d_bup_global,
float* d_blow_global,
int* d_Iup_global,
int* d_Ilow_global,
int* h_done,
int* d_done,
int* d_active,
int numThreads,
int numBlocksRed,
int ntraining,
int ntasks,
int activeTasks,
float* d_C)
{
int smemSize = 0;
bool isNtrainingPow2=isPow2(ntraining);
dim3 dimBlockActiveReduction(numThreads, 1, 1);
dim3 dimGridActiveReduction(numBlocksRed, activeTasks, 1);
if(isNtrainingPow2)
{
switch (numThreads)
{
case 512:
hipLaunchKernelGGL(( reduction <512,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 256:
hipLaunchKernelGGL(( reduction <256,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 128:
hipLaunchKernelGGL(( reduction <128,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 64:
hipLaunchKernelGGL(( reduction <64,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 32:
hipLaunchKernelGGL(( reduction <32,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 16:
hipLaunchKernelGGL(( reduction <16,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 8:
hipLaunchKernelGGL(( reduction <8,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 4:
hipLaunchKernelGGL(( reduction <4,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 2:
hipLaunchKernelGGL(( reduction <2,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 1:
hipLaunchKernelGGL(( reduction <1,true>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
}
}
else
{
switch (numThreads)
{
case 512:
hipLaunchKernelGGL(( reduction <512,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 256:
hipLaunchKernelGGL(( reduction <256,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 128:
hipLaunchKernelGGL(( reduction <128,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 64:
hipLaunchKernelGGL(( reduction <64,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 32:
hipLaunchKernelGGL(( reduction <32,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 16:
hipLaunchKernelGGL(( reduction <16,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 8:
hipLaunchKernelGGL(( reduction <8,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 4:
hipLaunchKernelGGL(( reduction <4,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done, d_active,ntraining,ntasks,activeTasks,d_C); break;
case 2:
hipLaunchKernelGGL(( reduction <2,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done, d_active,ntraining,ntasks,activeTasks,d_C); break;
case 1:
hipLaunchKernelGGL(( reduction <1,false>), dim3(dimGridActiveReduction), dim3(dimBlockActiveReduction), smemSize , 0, d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
}
}
hipDeviceSynchronize();
hipError_t error= hipGetLastError();
if(error)
{
printf("Errors Reduction!, %s,\n", hipGetErrorString(error));
getchar();
}
//globalparamsparallel(d_bup, d_blow, d_Iup,d_Ilow,h_bup_global,h_blow_global,h_Iup_global,h_Ilow_global, d_bup_global,d_blow_global, d_Iup_global,d_Ilow_global, d_done, d_active,ntasks,numBlocksRed,activeTasks);
globalparamsserial(h_bup,h_blow,h_Iup,h_Ilow,d_bup,d_blow, d_Iup,d_Ilow,h_bup_global,h_blow_global,h_Iup_global,h_Ilow_global,d_bup_global, d_blow_global,d_Iup_global,d_Ilow_global,h_done, d_done,ntasks,numBlocksRed);
}
|
2ee71f209c66261b927ce351b5e19aeef02d7ff1.cu
|
#include "../../include/reduction.h"
#include "../../include/utilities.h"
/**
* Performs an optimized reduction step to find Iup, Ilow, Bup and Blow
* @param d_ytraindata device pointer to the array of binary labels
* @param d_atraindata device pointer to the array of alphas
* @param d_fdata device pointer to the array of fs
* @param h_bup host pointer to the local bup values
* @param h_blow host pointer to the local blow values
* @param h_Iup host pointer to the local Iup values
* @param h_Ilow host pointer to the local Ilow values
* @param d_bup device pointer to the local bup values
* @param d_blow device pointer to the local blow values
* @param d_Iup device pointer to the local Iup values
* @param d_Ilow device pointer to the local Ilow values
* @param h_bup_global host pointer to the global bup values
* @param h_blow_global host pointer to the global blow values
* @param h_Iup_global host pointer to the global Iup values
* @param h_Ilow_global host pointer to the global Ilow values
* @param d_bup_global device pointer to the global bup values
* @param d_blow_global device pointer to the global blow values
* @param d_Iup_global device pointer to the global Iup values
* @param d_Ilow_global device pointer to the global Ilow values
* @param h_done host pointer to the array with the status of each binary task
* @param d_done_device pointer to the array with the status of each binary task
* @param d_active device pointer to the array with active binary tasks
* @param numthreads number of threads per block
* @param numBlockRed number of blocks in the reduction
* @param ntraining number of training samples in the training set
* @param ntasks number of binary tasks to be solved
* @param activeTasks number of active tasks
* @param d_C device pointer to the array of regularization parameters
*/
void reductionstep( int* d_ytraindata,
float* d_atraindata,
float* d_fdata,
float* h_bup,
float* h_blow,
int* h_Iup,
int* h_Ilow,
float* d_bup,
float* d_blow,
int* d_Iup,
int* d_Ilow,
float* h_bup_global,
float* h_blow_global,
int* h_Iup_global,
int* h_Ilow_global,
float* d_bup_global,
float* d_blow_global,
int* d_Iup_global,
int* d_Ilow_global,
int* h_done,
int* d_done,
int* d_active,
int numThreads,
int numBlocksRed,
int ntraining,
int ntasks,
int activeTasks,
float* d_C)
{
int smemSize = 0;
bool isNtrainingPow2=isPow2(ntraining);
dim3 dimBlockActiveReduction(numThreads, 1, 1);
dim3 dimGridActiveReduction(numBlocksRed, activeTasks, 1);
if(isNtrainingPow2)
{
switch (numThreads)
{
case 512:
reduction <512,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 256:
reduction <256,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 128:
reduction <128,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 64:
reduction <64,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 32:
reduction <32,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 16:
reduction <16,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 8:
reduction <8,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 4:
reduction <4,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 2:
reduction <2,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 1:
reduction <1,true><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
}
}
else
{
switch (numThreads)
{
case 512:
reduction <512,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 256:
reduction <256,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 128:
reduction <128,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup,d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 64:
reduction <64,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 32:
reduction <32,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 16:
reduction <16,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata,d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 8:
reduction <8,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
case 4:
reduction <4,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done, d_active,ntraining,ntasks,activeTasks,d_C); break;
case 2:
reduction <2,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done, d_active,ntraining,ntasks,activeTasks,d_C); break;
case 1:
reduction <1,false><<< dimGridActiveReduction, dimBlockActiveReduction, smemSize >>>(d_ytraindata,d_atraindata, d_fdata,d_bup,d_blow,d_Iup, d_Ilow, d_done,d_active,ntraining,ntasks,activeTasks,d_C); break;
}
}
cudaThreadSynchronize();
cudaError_t error= cudaGetLastError();
if(error)
{
printf("Errors Reduction!, %s,\n", cudaGetErrorString(error));
getchar();
}
//globalparamsparallel(d_bup, d_blow, d_Iup,d_Ilow,h_bup_global,h_blow_global,h_Iup_global,h_Ilow_global, d_bup_global,d_blow_global, d_Iup_global,d_Ilow_global, d_done, d_active,ntasks,numBlocksRed,activeTasks);
globalparamsserial(h_bup,h_blow,h_Iup,h_Ilow,d_bup,d_blow, d_Iup,d_Ilow,h_bup_global,h_blow_global,h_Iup_global,h_Ilow_global,d_bup_global, d_blow_global,d_Iup_global,d_Ilow_global,h_done, d_done,ntasks,numBlocksRed);
}
|
4bfa792ba7f9f313d2962f8da3a6a4e461c1f3ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <vector>
#include <cmath>
#include "../include/layer_kernels.cuh"
using namespace std;
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxEnergies: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
* top5Probs: (1, numCases) (*out)
*
* target: (1, numCases)
*
*/
__global__ void kMultiSoftmaxCost(float* probs, float* labels, float* maxProbs,
float* labelLogProbs, float* correctProbs, float* top5Probs,
const int numCases, const int numOut, const int setSize) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
const int label = int(labels[tx]);
const float maxp = maxProbs[tx];
const float labelp = probs[label * numCases + tx];
labelLogProbs[tx] = __logf(labelp);
int numBiggerProbs = 0, numEqualsProbs = 0;
for (int i = 0; i < numOut; ++i) {
numBiggerProbs += probs[i * numCases + tx] > labelp;
numEqualsProbs += probs[i * numCases + tx] == labelp;
}
const int slotsLeft = setSize - numBiggerProbs;
top5Probs[tx] = slotsLeft <= 0.0f ? 0.0f : (numEqualsProbs <= slotsLeft ? 1.0f : float(slotsLeft) / numEqualsProbs);
correctProbs[tx] = labelp != maxp ? 0.0f : 1.0f / float(numEqualsProbs);
}
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
* top5Probs: (1, numCases) (*out)
*
* target: (1, numCases) == log(y_l[labels,:]
*/
void computeMultiSoftmaxCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out,
NVMatrix& correctProbs_out, NVMatrix& top5Probs_out, int setSize) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.getNumElements() == numCases);
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
// NVMatrix& maxProbs = probs.max(0);
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
top5Probs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
hipStream_t stream = NVMatrix::getDefaultStream();
hipFuncSetCacheConfig(kMultiSoftmaxCost, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kMultiSoftmaxCost), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), top5Probs_out.getDevData(),
numCases, numOut, setSize);
getLastCudaError("kMultiSoftmaxCost: Kernel execution failed");
// hipDeviceSynchronize();
}
/*
* E = sum(p_l * log(y_l))
* probs: (numOut, numCases)
* labels: (numOut, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
__global__ void kCrossEntCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs,
const int numCases, const int numOut) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
probs += tx;
labels += tx;
maxProbs += tx;
labelLogProbs += tx;
correctProbs += tx;
const float maxp = maxProbs[0];
/*
* Compute the probability of guessing the correct case if you take the most-probable label.
*
* This is done like this:
*
* - If the most probable label is not equal to the true label, then the probability is zero.
* - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum).
*
* This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned
* maximum probability. But it's a safety measure to prevent over-estimating your accuracy.
* Though it could never happen in reality. Well it could. But it wouldn't. Cool?
*/
float crossEnt = 0.0f;
int numMax = 0;
bool correctLabel = false;
for (int i = 0; i < numOut; i++) {
const float label_prob = labels[i * numCases];
const float model_prob = probs[i * numCases];
numMax += model_prob == maxp;
crossEnt += label_prob * safelog(model_prob);
correctLabel |= model_prob == maxp && label_prob > 0.0f;
}
labelLogProbs[0] = crossEnt;
if (!correctLabel) {
correctProbs[0] = 0.0f;
} else {
correctProbs[0] = 1.0f / float(numMax);
}
}
}
/*
* E = sum(p_l * log(y_l))
* y_l: (numOut, numCases)
* labels: (numOut, numCases)
*
* dE_dy_l: (numOut, numCases)
*/
template <bool add>
__global__ void kCrossEntGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const float label_prob = labels[tidx];
const float model_prob = y_l[tidx];
const float v = gradCoeff * __fdividef(label_prob, model_prob);
if (add) {
dE_dy_l[tidx] += v;
} else {
dE_dy_l[tidx] = v;
}
}
}
/*
* E = sum(p_l * log(y_l))
* y_l: (numOut, numCases)
* labels: (numOut, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kCrossEntSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const float model_prob = y_l[tidx];
const float label_prob = labels[tidx];
float v = gradCoeff * (label_prob - model_prob);
if (add) {
dE_dx_l[tidx] += v;
} else {
dE_dx_l[tidx] = v;
}
}
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
__global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs,
const int numCases, const int numOut) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
const int label = int(labels[tx]);
const float maxp = maxProbs[tx];
const float labelp = probs[label * numCases + tx];
labelLogProbs[tx] = __logf(labelp);
/*
* Compute the probability of guessing the correct case if you take the most-probable label.
*
* This is done like this:
*
* - If the most probable label is not equal to the true label, then the probability is zero.
* - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum).
*
* This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned
* maximum probability. But it's a safety measure to prevent over-estimating your accuracy.
* Though it could never happen in reality. Well it could. But it wouldn't. Cool?
*/
if (labelp != maxp) {
correctProbs[tx] = 0;
} else {
int numMax = 0;
for (int i = 0; i < numOut; i++) {
numMax += probs[i * numCases + tx] == maxp;
}
correctProbs[tx] = 1.0f / float(numMax);
}
}
}
/*
* E = -log(y_t)
* y_l: (numOut, numCases)
* labels: (1, numCases)
*
* dE_dy_l: (numOut, numCases)
*/
template <bool add>
__global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const int label = int(labels[tx]);
float v = gradCoeff * (label == ty);
v = __fdividef(v, y_l[tidx]);
if (add) {
dE_dy_l[tidx] += v;
} else {
dE_dy_l[tidx] = v;
}
}
}
/*
* E = -log(y_t)
* y_l: (numOut, numCases)
* labels: (1, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const int label = int(labels[tx]);
float v = gradCoeff * ((label == ty) - y_l[tidx]);
if (add) {
dE_dx_l[tidx] += v;
} else {
dE_dx_l[tidx] = v;
}
}
}
/*
* dE_dy_l: (numOut, numCases)
* y_l: (numOut, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut, const float scaleTarget, const float scaleGrad) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
float v = 0;
for (int j = 0; j < numOut; j++) {
v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]);
}
v *= y_l[tidx];
if (add) {
dE_dx_l[tidx] = scaleTarget * dE_dx_l[tidx] + scaleGrad * v;
} else {
dE_dx_l[tidx] = scaleGrad * v;
}
}
}
template <int B_X, bool add>
__global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target,
const int numElements) {
for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) {
if (add) {
target[i] += actGrad[i] * (output[i] == input[i]);
} else {
target[i] = actGrad[i] * (output[i] == input[i]);
}
}
}
/*
* E = hinge_loss(disH(x1,x3) - disH(x1,x2))
* x1: (numCases, numHids)
* x2: (numCases, numHids)
* x3: (numCases, numHids)
* rankCost: (numCases, 1) (*out)
*/
__global__ void kLocRankCost(float* x1, float* x2, float* x3, float* x4, float* rankCost, const int numCases, const int numHids) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
float tempCost = 0;
float margin = 5.0;
if (tx < numCases) {
rankCost[tx] = 0;
for (int j = 0; j < numHids; j++) {
tempCost += 0.5 * ((2 * x1[j * numCases + tx] - 1) * (2 * x3[j * numCases + tx] - 1)- (2 * x1[j * numCases + tx] - 1) * (2 * x2[j * numCases + tx] - 1));
}
tempCost = tempCost + margin;
if (tempCost > 0) {
rankCost[tx] = x4[tx] * tempCost;
}
}
}
/*
* E = hinge_loss(disH(x1,x3) - disH(x1,x2)) + penalty_balance
* x1, x2, x3: (numCases, numHids)
* dE_dx_1, dE_dx_2, dE_dx_3: (numCases, numHids)
* rankCost: (numCases, 1)
*/
__global__ void kLocRankGrad(float* rankCost, float* x1, float* x2, float* x3, float* x4, float* dE_dx_1, float* dE_dx_2, float* dE_dx_3, const int numCases, const int numHids, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
const float rho = 1.0;
float mean_x1 = 0;
if (ty < numHids && tx < numCases) {
dE_dx_1[tidx] = 0;
dE_dx_2[tidx] = 0;
dE_dx_3[tidx] = 0;
for (int i = 0; i < numCases; i++) {
mean_x1 += (2 * x1[ty * numCases + i] - 1);
}
mean_x1 /= float(numCases);
if (rankCost[tx] > 0) {
dE_dx_1[tidx] = x4[tx] * 2 * gradCoeff * (x2[tidx] - x3[tidx]);
dE_dx_2[tidx] = x4[tx] * gradCoeff * (2 * x1[tidx] - 1);
dE_dx_3[tidx] = x4[tx] * (-1) * gradCoeff * (2 * x1[tidx] - 1);
}
dE_dx_1[tidx] = dE_dx_1[tidx] - x4[tx] * rho * 2 * mean_x1 / float(numCases);
}
}
void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) {
assert(actGrad.isContiguous());
assert(output.isContiguous());
assert(input.isContiguous());
assert(actGrad.isSameDims(input));
assert(actGrad.isSameDims(output));
dim3 blocks(DIVUP(actGrad.getNumElements(), 128));
dim3 threads(128);
hipStream_t stream = NVMatrix::getDefaultStream();
if (add) {
assert(actGrad.isSameDims(target));
hipFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kEltwiseMaxGrad<128, true>), dim3(blocks), dim3(threads), 0, stream, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
} else {
target.resize(actGrad);
hipFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kEltwiseMaxGrad<128, false>), dim3(blocks), dim3(threads), 0, stream, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
}
getLastCudaError("computeEltwiseMaxGrad: Kernel execution failed");
}
/*
* E = sum_i{-p_i*log(y_i)}
* probs: (numOut, numCases)
* labels: (numOut, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
void computeCrossEntCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.isSameDims(probs));
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
NVMatrix& maxProbs = probs.max(0);
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
hipStream_t stream = NVMatrix::getDefaultStream();
hipFuncSetCacheConfig(kCrossEntCost, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCrossEntCost), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(),
numCases, numOut);
getLastCudaError("kCrossEntCost: Kernel execution failed");
delete &maxProbs;
}
void computeCrossEntGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.isSameDims(probs));
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(!labels.isTrans());
assert(!probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
hipLaunchKernelGGL(( kCrossEntGrad<false>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
hipLaunchKernelGGL(( kCrossEntGrad<true>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("kCrossEntGrad: Kernel execution failed");
}
void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, float scaleTarget, float scaleGrad) {
int numCases = acts.getLeadingDim();
int numOut = acts.getFollowingDim();
assert(acts.isSameDims(actsGrad));
assert(acts.isContiguous());
assert(actsGrad.isContiguous());
assert(target.isContiguous());
assert(acts.isTrans());
assert(actsGrad.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
if (scaleTarget == 0) {
target.resize(acts);
hipLaunchKernelGGL(( kSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, stream, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad);
} else {
hipLaunchKernelGGL(( kSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, stream, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad);
}
getLastCudaError("computeSoftmaxGrad: Kernel execution failed");
}
void computeCrossEntSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getLeadingDim() == probs.getLeadingDim() && labels.getFollowingDim() == probs.getFollowingDim());
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(probs.isTrans());
assert(!labels.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
hipFuncSetCacheConfig(kCrossEntSoftmaxGrad<false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCrossEntSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
hipFuncSetCacheConfig(kCrossEntSoftmaxGrad<true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCrossEntSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("kCrossEntSoftmaxGrad: Kernel execution failed");
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases) == log(y_l[labels,:]
*/
void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.getNumElements() == numCases);
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
hipStream_t stream = NVMatrix::getDefaultStream();
hipFuncSetCacheConfig(kLogregCost, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLogregCost), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(),
numCases, numOut);
getLastCudaError("computeLogregCost: Kernel execution failed");
}
void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getNumElements() == numCases);
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(!labels.isTrans());
assert(!probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
hipLaunchKernelGGL(( kLogregCostGrad<false>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
hipLaunchKernelGGL(( kLogregCostGrad<true>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("computeLogregGrad: Kernel execution failed");
}
void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getNumElements() == numCases);
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
hipLaunchKernelGGL(( kLogregSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
hipLaunchKernelGGL(( kLogregSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, stream, probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("computeLogregSoftmaxGrad: Kernel execution failed");
}
void computeLocRankCost(NVMatrix& x1, NVMatrix& x2, NVMatrix& x3, NVMatrix& x4, NVMatrix& rankCost_out) {
int numCases = x1.getLeadingDim();
int numHids = x1.getFollowingDim();
assert(x2.getNumRows() == numCases);
assert(x3.getNumRows() == numCases);
assert(x4.getNumRows() == numCases);
assert(x1.isTrans());
assert(x4.isTrans());
rankCost_out.resize(numCases, 1);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
hipFuncSetCacheConfig(kLocRankCost, hipFuncCachePreferL1);
hipStream_t stream = NVMatrix::getDefaultStream();
hipLaunchKernelGGL(( kLocRankCost), dim3(blocks), dim3(threads), 0, stream, x1.getDevData(), x2.getDevData(), x3.getDevData(), x4.getDevData(), rankCost_out.getDevData(), numCases, numHids);
getLastCudaError("computeLocRankCost: Kernel execution failed");
// hipDeviceSynchronize();
}
void computeLocRankGrad(NVMatrix& rankCost, NVMatrix& x1, NVMatrix& x2, NVMatrix& x3, NVMatrix& x4, NVMatrix& target1, NVMatrix& target2, NVMatrix& target3, float coeff) {
int numCases = x1.getLeadingDim();
int numHids = x1.getFollowingDim();
assert(x2.getNumRows() == numCases);
assert(x3.getNumRows() == numCases);
assert(x4.getNumRows() == numCases);
assert(x1.isTrans());
assert(x4.isTrans());
assert(x1.isContiguous());
assert(x2.isContiguous());
assert(x3.isContiguous());
assert(x4.isContiguous());
assert(target1.isContiguous());
assert(target2.isContiguous());
assert(target3.isContiguous());
target1.resize(numCases, numHids);
target2.resize(numCases, numHids);
target3.resize(numCases, numHids);
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numHids, LOGREG_GRAD_THREADS_Y));
hipStream_t stream = NVMatrix::getDefaultStream();
hipLaunchKernelGGL(( kLocRankGrad), dim3(blocks), dim3(threads), 0, stream, rankCost.getDevData(), x1.getDevData(), x2.getDevData(), x3.getDevData(), x4.getDevData(), target1.getDevData(), target2.getDevData(), target3.getDevData(), numCases, numHids, coeff);
getLastCudaError("computeLocRankGrad: Kernel execution failed");
}
|
4bfa792ba7f9f313d2962f8da3a6a4e461c1f3ef.cu
|
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <vector>
#include <cmath>
#include "../include/layer_kernels.cuh"
using namespace std;
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxEnergies: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
* top5Probs: (1, numCases) (*out)
*
* target: (1, numCases)
*
*/
__global__ void kMultiSoftmaxCost(float* probs, float* labels, float* maxProbs,
float* labelLogProbs, float* correctProbs, float* top5Probs,
const int numCases, const int numOut, const int setSize) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
const int label = int(labels[tx]);
const float maxp = maxProbs[tx];
const float labelp = probs[label * numCases + tx];
labelLogProbs[tx] = __logf(labelp);
int numBiggerProbs = 0, numEqualsProbs = 0;
for (int i = 0; i < numOut; ++i) {
numBiggerProbs += probs[i * numCases + tx] > labelp;
numEqualsProbs += probs[i * numCases + tx] == labelp;
}
const int slotsLeft = setSize - numBiggerProbs;
top5Probs[tx] = slotsLeft <= 0.0f ? 0.0f : (numEqualsProbs <= slotsLeft ? 1.0f : float(slotsLeft) / numEqualsProbs);
correctProbs[tx] = labelp != maxp ? 0.0f : 1.0f / float(numEqualsProbs);
}
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
* top5Probs: (1, numCases) (*out)
*
* target: (1, numCases) == log(y_l[labels,:]
*/
void computeMultiSoftmaxCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out,
NVMatrix& correctProbs_out, NVMatrix& top5Probs_out, int setSize) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.getNumElements() == numCases);
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
// NVMatrix& maxProbs = probs.max(0);
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
top5Probs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
cudaStream_t stream = NVMatrix::getDefaultStream();
cudaFuncSetCacheConfig(kMultiSoftmaxCost, cudaFuncCachePreferL1);
kMultiSoftmaxCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), top5Probs_out.getDevData(),
numCases, numOut, setSize);
getLastCudaError("kMultiSoftmaxCost: Kernel execution failed");
// cudaThreadSynchronize();
}
/*
* E = sum(p_l * log(y_l))
* probs: (numOut, numCases)
* labels: (numOut, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
__global__ void kCrossEntCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs,
const int numCases, const int numOut) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
probs += tx;
labels += tx;
maxProbs += tx;
labelLogProbs += tx;
correctProbs += tx;
const float maxp = maxProbs[0];
/*
* Compute the probability of guessing the correct case if you take the most-probable label.
*
* This is done like this:
*
* - If the most probable label is not equal to the true label, then the probability is zero.
* - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum).
*
* This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned
* maximum probability. But it's a safety measure to prevent over-estimating your accuracy.
* Though it could never happen in reality. Well it could. But it wouldn't. Cool?
*/
float crossEnt = 0.0f;
int numMax = 0;
bool correctLabel = false;
for (int i = 0; i < numOut; i++) {
const float label_prob = labels[i * numCases];
const float model_prob = probs[i * numCases];
numMax += model_prob == maxp;
crossEnt += label_prob * safelog(model_prob);
correctLabel |= model_prob == maxp && label_prob > 0.0f;
}
labelLogProbs[0] = crossEnt;
if (!correctLabel) {
correctProbs[0] = 0.0f;
} else {
correctProbs[0] = 1.0f / float(numMax);
}
}
}
/*
* E = sum(p_l * log(y_l))
* y_l: (numOut, numCases)
* labels: (numOut, numCases)
*
* dE_dy_l: (numOut, numCases)
*/
template <bool add>
__global__ void kCrossEntGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const float label_prob = labels[tidx];
const float model_prob = y_l[tidx];
const float v = gradCoeff * __fdividef(label_prob, model_prob);
if (add) {
dE_dy_l[tidx] += v;
} else {
dE_dy_l[tidx] = v;
}
}
}
/*
* E = sum(p_l * log(y_l))
* y_l: (numOut, numCases)
* labels: (numOut, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kCrossEntSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const float model_prob = y_l[tidx];
const float label_prob = labels[tidx];
float v = gradCoeff * (label_prob - model_prob);
if (add) {
dE_dx_l[tidx] += v;
} else {
dE_dx_l[tidx] = v;
}
}
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
__global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs,
const int numCases, const int numOut) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
const int label = int(labels[tx]);
const float maxp = maxProbs[tx];
const float labelp = probs[label * numCases + tx];
labelLogProbs[tx] = __logf(labelp);
/*
* Compute the probability of guessing the correct case if you take the most-probable label.
*
* This is done like this:
*
* - If the most probable label is not equal to the true label, then the probability is zero.
* - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum).
*
* This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned
* maximum probability. But it's a safety measure to prevent over-estimating your accuracy.
* Though it could never happen in reality. Well it could. But it wouldn't. Cool?
*/
if (labelp != maxp) {
correctProbs[tx] = 0;
} else {
int numMax = 0;
for (int i = 0; i < numOut; i++) {
numMax += probs[i * numCases + tx] == maxp;
}
correctProbs[tx] = 1.0f / float(numMax);
}
}
}
/*
* E = -log(y_t)
* y_l: (numOut, numCases)
* labels: (1, numCases)
*
* dE_dy_l: (numOut, numCases)
*/
template <bool add>
__global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const int label = int(labels[tx]);
float v = gradCoeff * (label == ty);
v = __fdividef(v, y_l[tidx]);
if (add) {
dE_dy_l[tidx] += v;
} else {
dE_dy_l[tidx] = v;
}
}
}
/*
* E = -log(y_t)
* y_l: (numOut, numCases)
* labels: (1, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases,
const int numOut, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
const int label = int(labels[tx]);
float v = gradCoeff * ((label == ty) - y_l[tidx]);
if (add) {
dE_dx_l[tidx] += v;
} else {
dE_dx_l[tidx] = v;
}
}
}
/*
* dE_dy_l: (numOut, numCases)
* y_l: (numOut, numCases)
*
* dE_dx_l: (numOut, numCases)
*/
template <bool add>
__global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut, const float scaleTarget, const float scaleGrad) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
float v = 0;
for (int j = 0; j < numOut; j++) {
v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]);
}
v *= y_l[tidx];
if (add) {
dE_dx_l[tidx] = scaleTarget * dE_dx_l[tidx] + scaleGrad * v;
} else {
dE_dx_l[tidx] = scaleGrad * v;
}
}
}
template <int B_X, bool add>
__global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target,
const int numElements) {
for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) {
if (add) {
target[i] += actGrad[i] * (output[i] == input[i]);
} else {
target[i] = actGrad[i] * (output[i] == input[i]);
}
}
}
/*
* E = hinge_loss(disH(x1,x3) - disH(x1,x2))
* x1: (numCases, numHids)
* x2: (numCases, numHids)
* x3: (numCases, numHids)
* rankCost: (numCases, 1) (*out)
*/
__global__ void kLocRankCost(float* x1, float* x2, float* x3, float* x4, float* rankCost, const int numCases, const int numHids) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
float tempCost = 0;
float margin = 5.0;
if (tx < numCases) {
rankCost[tx] = 0;
for (int j = 0; j < numHids; j++) {
tempCost += 0.5 * ((2 * x1[j * numCases + tx] - 1) * (2 * x3[j * numCases + tx] - 1)- (2 * x1[j * numCases + tx] - 1) * (2 * x2[j * numCases + tx] - 1));
}
tempCost = tempCost + margin;
if (tempCost > 0) {
rankCost[tx] = x4[tx] * tempCost;
}
}
}
/*
* E = hinge_loss(disH(x1,x3) - disH(x1,x2)) + penalty_balance
* x1, x2, x3: (numCases, numHids)
* dE_dx_1, dE_dx_2, dE_dx_3: (numCases, numHids)
* rankCost: (numCases, 1)
*/
__global__ void kLocRankGrad(float* rankCost, float* x1, float* x2, float* x3, float* x4, float* dE_dx_1, float* dE_dx_2, float* dE_dx_3, const int numCases, const int numHids, const float gradCoeff) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
const float rho = 1.0;
float mean_x1 = 0;
if (ty < numHids && tx < numCases) {
dE_dx_1[tidx] = 0;
dE_dx_2[tidx] = 0;
dE_dx_3[tidx] = 0;
for (int i = 0; i < numCases; i++) {
mean_x1 += (2 * x1[ty * numCases + i] - 1);
}
mean_x1 /= float(numCases);
if (rankCost[tx] > 0) {
dE_dx_1[tidx] = x4[tx] * 2 * gradCoeff * (x2[tidx] - x3[tidx]);
dE_dx_2[tidx] = x4[tx] * gradCoeff * (2 * x1[tidx] - 1);
dE_dx_3[tidx] = x4[tx] * (-1) * gradCoeff * (2 * x1[tidx] - 1);
}
dE_dx_1[tidx] = dE_dx_1[tidx] - x4[tx] * rho * 2 * mean_x1 / float(numCases);
}
}
void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) {
assert(actGrad.isContiguous());
assert(output.isContiguous());
assert(input.isContiguous());
assert(actGrad.isSameDims(input));
assert(actGrad.isSameDims(output));
dim3 blocks(DIVUP(actGrad.getNumElements(), 128));
dim3 threads(128);
cudaStream_t stream = NVMatrix::getDefaultStream();
if (add) {
assert(actGrad.isSameDims(target));
cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, cudaFuncCachePreferL1);
kEltwiseMaxGrad<128, true><<<blocks, threads, 0, stream>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
} else {
target.resize(actGrad);
cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, cudaFuncCachePreferL1);
kEltwiseMaxGrad<128, false><<<blocks, threads, 0, stream>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements());
}
getLastCudaError("computeEltwiseMaxGrad: Kernel execution failed");
}
/*
* E = sum_i{-p_i*log(y_i)}
* probs: (numOut, numCases)
* labels: (numOut, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases)
*/
void computeCrossEntCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.isSameDims(probs));
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
NVMatrix& maxProbs = probs.max(0);
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
cudaStream_t stream = NVMatrix::getDefaultStream();
cudaFuncSetCacheConfig(kCrossEntCost, cudaFuncCachePreferL1);
kCrossEntCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(),
numCases, numOut);
getLastCudaError("kCrossEntCost: Kernel execution failed");
delete &maxProbs;
}
void computeCrossEntGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.isSameDims(probs));
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(!labels.isTrans());
assert(!probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
kCrossEntGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
kCrossEntGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("kCrossEntGrad: Kernel execution failed");
}
void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, float scaleTarget, float scaleGrad) {
int numCases = acts.getLeadingDim();
int numOut = acts.getFollowingDim();
assert(acts.isSameDims(actsGrad));
assert(acts.isContiguous());
assert(actsGrad.isContiguous());
assert(target.isContiguous());
assert(acts.isTrans());
assert(actsGrad.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
if (scaleTarget == 0) {
target.resize(acts);
kSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad);
} else {
kSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad);
}
getLastCudaError("computeSoftmaxGrad: Kernel execution failed");
}
void computeCrossEntSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getLeadingDim() == probs.getLeadingDim() && labels.getFollowingDim() == probs.getFollowingDim());
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(probs.isTrans());
assert(!labels.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
cudaFuncSetCacheConfig(kCrossEntSoftmaxGrad<false>, cudaFuncCachePreferL1);
kCrossEntSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
cudaFuncSetCacheConfig(kCrossEntSoftmaxGrad<true>, cudaFuncCachePreferL1);
kCrossEntSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("kCrossEntSoftmaxGrad: Kernel execution failed");
}
/*
* E = -log(y_t)
* probs: (numOut, numCases)
* labels: (1, numCases)
* maxProbs: (1, numCases)
* labelLogProbs: (1, numCases) (*out)
* correctProbs: (1, numCases) (*out)
*
* target: (1, numCases) == log(y_l[labels,:]
*/
void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) {
int numCases = probs.getNumCols();
int numOut = probs.getNumRows();
assert(labels.getNumElements() == numCases);
assert(!labels.isTrans());
assert(!probs.isTrans());
assert(labels.isContiguous());
assert(probs.isContiguous());
labelLogProbs_out.resize(1, numCases);
correctProbs_out.resize(1, numCases);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
cudaStream_t stream = NVMatrix::getDefaultStream();
cudaFuncSetCacheConfig(kLogregCost, cudaFuncCachePreferL1);
kLogregCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(),
labelLogProbs_out.getDevData(), correctProbs_out.getDevData(),
numCases, numOut);
getLastCudaError("computeLogregCost: Kernel execution failed");
}
void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getNumElements() == numCases);
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(!labels.isTrans());
assert(!probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
kLogregCostGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
kLogregCostGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("computeLogregGrad: Kernel execution failed");
}
void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) {
int numCases = probs.getLeadingDim();
int numOut = probs.getFollowingDim();
assert(labels.getNumElements() == numCases);
assert(probs.isContiguous());
assert(target.isContiguous());
assert(labels.isContiguous());
assert(probs.isTrans());
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
if (!add) {
target.resize(probs);
kLogregSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
} else {
kLogregSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(),
numCases, numOut, coeff);
}
getLastCudaError("computeLogregSoftmaxGrad: Kernel execution failed");
}
void computeLocRankCost(NVMatrix& x1, NVMatrix& x2, NVMatrix& x3, NVMatrix& x4, NVMatrix& rankCost_out) {
int numCases = x1.getLeadingDim();
int numHids = x1.getFollowingDim();
assert(x2.getNumRows() == numCases);
assert(x3.getNumRows() == numCases);
assert(x4.getNumRows() == numCases);
assert(x1.isTrans());
assert(x4.isTrans());
rankCost_out.resize(numCases, 1);
dim3 threads(LOGREG_ERR_THREADS_X, 1);
dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1);
cudaFuncSetCacheConfig(kLocRankCost, cudaFuncCachePreferL1);
cudaStream_t stream = NVMatrix::getDefaultStream();
kLocRankCost<<<blocks, threads, 0, stream>>>(x1.getDevData(), x2.getDevData(), x3.getDevData(), x4.getDevData(), rankCost_out.getDevData(), numCases, numHids);
getLastCudaError("computeLocRankCost: Kernel execution failed");
// cudaThreadSynchronize();
}
void computeLocRankGrad(NVMatrix& rankCost, NVMatrix& x1, NVMatrix& x2, NVMatrix& x3, NVMatrix& x4, NVMatrix& target1, NVMatrix& target2, NVMatrix& target3, float coeff) {
int numCases = x1.getLeadingDim();
int numHids = x1.getFollowingDim();
assert(x2.getNumRows() == numCases);
assert(x3.getNumRows() == numCases);
assert(x4.getNumRows() == numCases);
assert(x1.isTrans());
assert(x4.isTrans());
assert(x1.isContiguous());
assert(x2.isContiguous());
assert(x3.isContiguous());
assert(x4.isContiguous());
assert(target1.isContiguous());
assert(target2.isContiguous());
assert(target3.isContiguous());
target1.resize(numCases, numHids);
target2.resize(numCases, numHids);
target3.resize(numCases, numHids);
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numHids, LOGREG_GRAD_THREADS_Y));
cudaStream_t stream = NVMatrix::getDefaultStream();
kLocRankGrad<<<blocks, threads, 0, stream>>>(rankCost.getDevData(), x1.getDevData(), x2.getDevData(), x3.getDevData(), x4.getDevData(), target1.getDevData(), target2.getDevData(), target3.getDevData(), numCases, numHids, coeff);
getLastCudaError("computeLocRankGrad: Kernel execution failed");
}
|
afede91f58a662f1ffc76a0dd60c3f3dd6397936.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <future>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "math.h"
#include "sift.h"
#include "matrix_helper.h"
#include "cuda_timer.h"
#include "spdlog/spdlog.h"
namespace cudautils {
template<typename T>
struct greater_tol : public thrust::binary_function<T, T, bool>
{
// tolerance to compare doubles (15 decimal places)
__host__ __device__ bool operator()(const T &lhs, const T &rhs) const {
if (fabs(lhs - rhs) <= .000000000000001)
return false;
return lhs > rhs;
}
};
struct is_negative {
__host__ __device__ bool operator() (const long long a) const {
return a < 0;
}
};
struct isnan_test {
__host__ __device__ bool operator() (const float a) const {
return isnan(a);
}
};
// row major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__forceinline__ __device__
int bin_sub2ind_row(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params) {
return (int) l + sift_params.nFaces * (k + j * sift_params.IndexSize + i
* pow(sift_params.IndexSize, 2));
}
// column major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__forceinline__ __device__
int bin_sub2ind(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params) {
return (int) i + j * sift_params.IndexSize + k * pow(sift_params.IndexSize, 2)
+ l * pow(sift_params.IndexSize, 3);
}
// column major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__global__
void bin_sub2ind_wrap(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params, int* ind) {
*ind = bin_sub2ind(i, j, k, l, sift_params);
return ;
}
__forceinline__ __device__
void place_in_index(double* index, double mag, int i, int j, int k,
double* yy, uint16_t* ix, long long idx, const cudautils::SiftParams sift_params) {
double tmpsum = 0.0;
int bin_index;
if (sift_params.Smooth_Flag) {
for (int tessel=0; tessel < sift_params.Tessel_thresh; tessel++) {
tmpsum += pow(yy[tessel], sift_params.Smooth_Var);
}
// Add three nearest tesselation faces
for (int ii=0; ii < sift_params.Tessel_thresh; ii++) {
bin_index = bin_sub2ind(i, j, k, ix[ii], sift_params);
#ifdef DEBUG_NUMERICAL
printf("i%d j%d k%d ix[ii]%d bin_index%d yy[ii]%.54f, index+=%.54f, idx%lld\n",
i, j, k, ix[ii], bin_index, yy[ii], mag * pow(yy[ii],
sift_params.Smooth_Var ) / tmpsum, idx);
#endif
index[bin_index] += mag * pow(yy[ii], sift_params.Smooth_Var ) / tmpsum;
}
} else {
bin_index = bin_sub2ind(i, j, k, ix[0], sift_params);
index[bin_index] += mag;
}
return;
}
// matrix multiply in row memory order
// first is a matrix in row order
// second is the array multiply
// assumes length of second = cols of first
__forceinline__ __device__
void dot_product(double* first, double* second, double* out, int rows,
int cols) {
for (int i=0; i < rows; i++) {
double sum = 0.0;
for (int j=0; j < cols; j++) {
sum += first[j + i * cols] * second[j];
}
out[i] = sum;
}
}
// matrix multiply in row memory order
// first is a matrix in row order
// second is the array multiply
// assumes length of second = cols of first
__global__
void dot_product_wrap(double* first, double* second, double* out, int rows,
int cols) {
dot_product(first, second, out, rows, cols);
return;
}
// matrix multiply in col memory order
// first is a matrix in column order
// second is the array multiply
// assumes length of second = cols of first
__forceinline__ __device__
void dot_product_col_ord(double* first, double* second, double* out, int rows,
int cols) {
for (int i=0; i < rows; i++) {
double sum = 0.0;
for (int j=0; j < cols; j++) {
sum += first[i + j * rows] * second[j];
}
out[i] = sum;
}
}
// assumes r,c,s lie within accessible image boundaries
__forceinline__ __device__
double get_grad_ori_vector(double* image, long long idx, unsigned int
x_stride, unsigned int y_stride, int r, int c, int t, double vect[3],
double* yy, uint16_t* ix, const cudautils::SiftParams sift_params,
double* device_centers) {
int last_row = sift_params.image_size0 - 1;
int last_col = sift_params.image_size1 - 1;
int last_slice = sift_params.image_size2 - 1;
/* this is literal translation from Scovanner et al. 3DSIFT,
even though it seems xgrad and ygrad are switched, and ygrad seems to be
in wrong direction. Protect edge cases explicitly rather than
by padding
*/
double xgrad, ygrad, zgrad;
if (c == 0) {
xgrad = 2.0 * (image[idx + x_stride] - image[idx]);
} else if (c == last_col) {
xgrad = 2.0 * (image[idx] - image[idx - x_stride]);
} else {
xgrad = image[idx + x_stride] - image[idx - x_stride];
}
if (r == 0) {
ygrad = 2.0 * (image[idx] - image[idx + 1]);
} else if (r == last_row) {
ygrad = 2.0 * (image[idx - 1] - image[idx]);
} else {
ygrad = image[idx - 1] - image[idx + 1];
}
if (t == 0) {
zgrad = 2.0 * (image[idx + x_stride * y_stride] - image[idx]);
} else if (t == last_slice) {
zgrad = 2.0 * (image[idx] - image[idx - x_stride * y_stride]);
} else {
zgrad = image[idx + x_stride * y_stride] - image[idx - x_stride * y_stride];
}
double mag = sqrt(xgrad * xgrad + ygrad * ygrad + zgrad * zgrad);
xgrad /= mag;
ygrad /= mag;
zgrad /= mag;
if (mag != 0.0) {
vect[0] = xgrad;
vect[1] = ygrad;
vect[2] = zgrad;
} else {
vect[0] = 1.0;
vect[1] = 0.0;
vect[2] = 0.0;
}
//Find the nearest tesselation face indices
// N = sift_params.nFaces
int N = sift_params.fv_centers_len / DIMS;
dot_product(device_centers, vect, yy, N, DIMS);
// overwrite idxs 1 : N, N can not exceed the length of ori_hist
thrust::sequence(thrust::device, ix, ix + sift_params.nFaces);
thrust::stable_sort_by_key(thrust::device, yy, yy + sift_params.nFaces, ix, thrust::greater<double>());
#ifdef DEBUG_NUMERICAL
printf("ggov N%d fv_len%d DIMS%d idx%lld vect0 %.4f vect1 %.4f vect2 %.4f image[idx] %.4f r%d c%d t%d yy %.4f %.4f %.4f %.4f ix %d %d %d %d eq:%d diff:%.54f\n",
N, sift_params.fv_centers_len, DIMS, idx, vect[0], vect[1], vect[2],
image[idx], r, c, t, yy[0], yy[1], yy[2], yy[3], ix[0], ix[1], ix[2],
ix[3], yy[2] == yy[3], yy[2] - yy[3]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[0], device_centers[3 * ix[0]], device_centers[3 * ix[0] + 1], device_centers[3 * ix[0] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[1], device_centers[3 * ix[1]], device_centers[3 * ix[1] + 1], device_centers[3 * ix[1] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[2], device_centers[3 * ix[2]], device_centers[3 * ix[2] + 1], device_centers[3 * ix[2] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[3], device_centers[3 * ix[3]], device_centers[3 * ix[3] + 1], device_centers[3 * ix[3] + 2]);
#endif
return mag;
}
__global__
void get_grad_ori_vector_wrap(double* image, long long idx, unsigned int
x_stride, unsigned int y_stride, int r, int c, int t, double vect[3], double* yy, uint16_t* ix,
const cudautils::SiftParams sift_params, double* device_centers, double* mag) {
*mag = cudautils::get_grad_ori_vector(image,
idx, x_stride, y_stride, r, c, t, vect,
yy, ix, sift_params, device_centers);
return;
}
/*r, c, s is the pixel index (x, y, z dimensions respect.) in the image within the radius of the */
/*keypoint before clamped*/
/*For each pixel, take a neighborhhod of xyradius and tiradius,*/
/*bin it down to the sift_params.IndexSize dimensions*/
/*thus, i_indx, j_indx, s_indx represent the binned index within the radius of the keypoint*/
__forceinline__ __device__
void add_sample(double* index, double* image, double distsq, long long
idx, unsigned int x_stride, unsigned int y_stride, int i_bin, int j_bin, int k_bin,
int r, int c, int t, const cudautils::SiftParams sift_params, double*
device_centers, uint16_t* ix, double* yy) {
double sigma = sift_params.SigmaScaled;
double weight = exp(-(distsq / (2.0 * sigma * sigma)));
double vect[3] = {0.0, 0.0, 0.0};
// gradient and orientation vectors calculated from 3D halo/neighboring
// pixels
double mag = get_grad_ori_vector(image, idx, x_stride, y_stride, r, c, t,
vect, yy, ix, sift_params, device_centers);
mag *= weight; // scale magnitude by gaussian
place_in_index(index, mag, i_bin, j_bin, k_bin, yy, ix, idx, sift_params);
return;
}
// floor quotient, add 1
// clamp bin idx to IndexSize
__forceinline__ __device__
int get_bin_idx(int orig, int radius, int IndexSize) {
int idx = (int) floor((orig + radius) / (2.0 * (double) radius / IndexSize));
if (idx >= IndexSize) // clamp to IndexSize
idx = IndexSize - 1;
return idx;
}
// floor quotient, add 1
// clamp bin idx to IndexSize
__global__
void get_bin_idx_wrap(int orig, int radius, int IndexSize, int* idx) {
*idx = get_bin_idx(orig, radius, IndexSize);
return;
}
__forceinline__ __device__
double* key_sample(const cudautils::SiftParams sift_params,
cudautils::Keypoint key, double* image, long long idx,
unsigned int x_stride, unsigned int y_stride,
double* device_centers, uint16_t* ix, double* yy,
double* index) {
double xySpacing = (double) sift_params.xyScale * sift_params.MagFactor;
double tSpacing = (double) sift_params.tScale * sift_params.MagFactor;
int xyiradius = rint(1.414 * xySpacing * (sift_params.IndexSize + 1) / 2.0);
int tiradius = rint(1.414 * tSpacing * (sift_params.IndexSize + 1) / 2.0);
// Surrounding radius of pixels are binned for computation
// according to sift_params.IndexSize
int r, c, t, i_bin, j_bin, k_bin;
double distsq;
long long update_idx;
for (int i = -xyiradius; i <= xyiradius; i++) {
for (int j = -xyiradius; j <= xyiradius; j++) {
for (int k = -tiradius; k <= tiradius; k++) {
distsq = (double) pow(i,2) + pow(j,2) + pow(k,2);
// Find bin idx
i_bin = get_bin_idx(i, xyiradius, sift_params.IndexSize);
j_bin = get_bin_idx(j, xyiradius, sift_params.IndexSize);
k_bin = get_bin_idx(k, tiradius, sift_params.IndexSize);
// Find original image pixel idx
r = key.x + i;
c = key.y + j;
t = key.z + k;
// only add if within image range
if (!(r < 0 || r >= sift_params.image_size0 ||
c < 0 || c >= sift_params.image_size1
|| t < 0 || t >= sift_params.image_size2)) {
// image is assumed as column order
// make sure it isn't cast to unsigned
update_idx = (long long) idx + i + (int) x_stride * j +
(int) x_stride * (int) y_stride * k;
add_sample(index, image, distsq, update_idx, x_stride, y_stride,
i_bin, j_bin, k_bin, r, c, t, sift_params,
device_centers, ix, yy);
}
}
}
}
return index;
}
__forceinline__ __device__
double* build_ori_hists(int x, int y, int z, long long idx, unsigned int
x_stride, unsigned int y_stride, int radius, double* image,
const cudautils::SiftParams sift_params, double* device_centers,
uint16_t* ix, double* yy, double* ori_hist) {
double mag;
double vect[3] = {0.0, 0.0, 0.0};
int r, c, t;
long long update_idx;
for (int i = -radius; i <= radius; i++) {
for (int j = -radius; j <= radius; j++) {
for (int k = -radius; k <= radius; k++) {
// Find original image pixel idx
r = x + i;
c = y + j;
t = z + k;
// only add if within image range
// NOTE from original source
// Do not use last row or column, which are not valid.
if (!(r < 0 || r >= sift_params.image_size0 - 2 ||
c < 0 || c >= sift_params.image_size1 - 2 ||
t < 0 || t >= sift_params.image_size2 - 2)) {
// image is assumed as column order
// make sure it isn't cast to unsigned
update_idx = (long long) idx + i + (int) x_stride * j +
(int) x_stride * (int) y_stride * k;
/*gradient and orientation vectors calculated from 3D halo/neighboring pixels*/
mag = get_grad_ori_vector(image, update_idx, x_stride, y_stride,
r, c, t, vect, yy, ix, sift_params, device_centers);
ori_hist[ix[0]] += mag;
}
}
}
}
return ori_hist;
}
__forceinline__ __device__
void normalize_arr(double* arr, int len) {
double sqlen = 0.0;
for (int i=0; i < len; i++) {
sqlen += arr[i] * arr[i];
}
double fac = 1.0 / sqrt(sqlen);
for (int i=0; i < len; i++) {
arr[i] = arr[i] * fac;
}
return;
}
__forceinline__ __device__
cudautils::Keypoint make_keypoint_sample(cudautils::Keypoint key, double*
image, const cudautils::SiftParams sift_params, unsigned int thread_idx, long long idx,
unsigned int x_stride, unsigned int y_stride, double * descriptors,
double* device_centers, uint16_t* ix, double* yy) {
bool changed = false;
// default N=640; 5120 bytes
int N = sift_params.descriptor_len;
double* index = &(descriptors[thread_idx * sift_params.descriptor_len]);
memset(index, 0.0, N * sizeof(double));
key_sample(sift_params, key, image, idx, x_stride, y_stride,
device_centers, ix, yy, index);
#ifdef DEBUG_NUMERICAL
for (int i=0; i < sift_params.descriptor_len; i++) {
if (index[i] != 0)
printf("index[%d]=%.4f\n",i, index[i]);
}
printf("\n");
#endif
normalize_arr(index, N);
for (int i=0; i < N; i++) {
if (index[i] > sift_params.MaxIndexVal) {
index[i] = sift_params.MaxIndexVal;
changed = true;
}
}
if (changed) {
normalize_arr(index, N);
}
int intval;
for (int i=0; i < N; i++) {
intval = rint(512.0 * index[i]);
index[i] = (double) min(255, intval);
}
return key;
}
__forceinline__ __device__
cudautils::Keypoint make_keypoint(double* image, int x, int y, int z,
unsigned int thread_idx, long long idx, unsigned int x_stride, unsigned int y_stride,
const cudautils::SiftParams sift_params, double * descriptors, double*
device_centers, uint16_t* ix, double* yy) {
cudautils::Keypoint key;
key.x = x;
key.y = y;
key.z = z;
return make_keypoint_sample(key, image, sift_params, thread_idx, idx,
x_stride, y_stride, descriptors, device_centers, ix, yy);
}
/* Main function of 3DSIFT Program from http://www.cs.ucf.edu/~pscovann/
Inputs:
image - a 3 dimensional matrix of double
xyScale and tScale - affects both the scale and the resolution, these are
usually set to 1 and scaling is done before calling this function
x, y, and z - the location of the center of the keypoint where a descriptor is requested
Outputs:
keypoint - the descriptor, varies in size depending on values in LoadParams.m
reRun - a flag (0 or 1) which is set if the data at (x,y,z) is not
descriptive enough for a good keypoint
*/
__global__
void create_descriptor(
unsigned int x_stride,
unsigned int y_stride,
unsigned int x_sub_start,
unsigned int y_sub_start,
unsigned int dw,
const unsigned int map_idx_size,
long long *map_idx,
int8_t *map,
double *image,
const cudautils::SiftParams sift_params,
double* device_centers,
double *descriptors,
uint16_t* idx_scratch,
double* yy_scratch,
uint16_t* ori_idx_scratch,
double* ori_scratch) {
// thread per keypoint in this substream
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= map_idx_size) return;
// map_idx holds the relevant image idxs only for the substream
// map_idx_size matchs total # of threads
// idx describes the linear index for current GPUs section of the image and corresponding map
long long idx = map_idx[thread_idx];
// column-major order since image is from matlab
int x, y, z;
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_stride, y_stride, idx, padding_x, padding_y, padding_z);
// correct for dw_ padding, 0-indexed for checking boundaries
x = x_sub_start + padding_x - dw;
y = y_sub_start + padding_y - dw;
z = padding_z - dw;
uint16_t* ix = (uint16_t*) &(idx_scratch[thread_idx * sift_params.nFaces]);
cudaCheckPtrDevice(ix);
thrust::sequence(thrust::device, ix, ix + sift_params.nFaces);
double *yy = (double*) &(yy_scratch[thread_idx * sift_params.nFaces]);
cudaCheckPtrDevice(yy);
if (sift_params.TwoPeak_Flag) {
int radius = rint(sift_params.xyScale * 3.0);
// init ori hist indices
int ori_hist_len = sift_params.nFaces; //default 80
uint16_t* ori_hist_idx = &(ori_idx_scratch[ori_hist_len * thread_idx]);
cudaCheckPtrDevice(ori_hist_idx);
thrust::sequence(thrust::device, ori_hist_idx, ori_hist_idx + ori_hist_len);
//init ori histogram
double* ori_hist = &(ori_scratch[ori_hist_len * thread_idx]);
cudaCheckPtrDevice(ori_hist);
memset(ori_hist, 0.0, ori_hist_len * sizeof(double));
build_ori_hists(x, y, z, idx, x_stride, y_stride, radius, image,
sift_params, device_centers, ix, yy, ori_hist);
// descending order according to ori_hist
thrust::stable_sort_by_key(thrust::device, ori_hist, ori_hist +
ori_hist_len, ori_hist_idx, thrust::greater<double>());
double prod01, prod02;
dot_product(&(device_centers[DIMS * ori_hist_idx[0]]),
&(device_centers[DIMS * ori_hist_idx[1]]), &prod01, 1, DIMS);
dot_product(&(device_centers[DIMS * ori_hist_idx[0]]),
&(device_centers[DIMS * ori_hist_idx[2]]), &prod02, 1, DIMS);
#ifdef DEBUG_NUMERICAL
printf("TPF x%d y%d z%d ori_hist %.25f %.25f %.25f ori_hist_idx %d %d %d %d prod01 %.25f prod02 %.25f eq:%d diff:%.54f\n",
x, y, z, ori_hist[0], ori_hist[1], ori_hist[2], ori_hist_idx[0], ori_hist_idx[1], ori_hist_idx[2], ori_hist_idx[3],
prod01, prod02, ori_hist[2] == ori_hist[3], ori_hist[2] - ori_hist[3]);
#endif
if ( ( prod01 > sift_params.TwoPeak_Thresh) &&
( prod02 > sift_params.TwoPeak_Thresh) ) {
// mark this keypoint as null in map
map_idx[thread_idx] = -1;
#ifdef DEBUG_OUTPUT
printf("Removed keypoint from thread: %u, desc index: %lld, x:%d y:%d z:%d\n",
thread_idx, idx, x, y, z);
#endif
return ;
}
}
cudautils::Keypoint key = make_keypoint(image, x, y, z, thread_idx, idx,
x_stride, y_stride, sift_params, descriptors, device_centers, ix,
yy);
return;
}
/*Define the constructor for the SIFT class*/
/*See the class Sift definition in sift.h*/
Sift::Sift(
const unsigned int x_size,
const unsigned int y_size,
const unsigned int z_size,
const unsigned int x_sub_size,
const unsigned int y_sub_size,
const unsigned int dx,
const unsigned int dy,
const unsigned int dw,
const unsigned int num_gpus,
const unsigned int num_streams,
const cudautils::SiftParams sift_params,
const double* fv_centers)
: x_size_(x_size), y_size_(y_size), z_size_(z_size),
x_sub_size_(x_sub_size), y_sub_size_(y_sub_size),
dx_(dx), dy_(dy), dw_(dw),
num_gpus_(num_gpus), num_streams_(num_streams),
sift_params_(sift_params),
fv_centers_(fv_centers),
subdom_data_(num_gpus) {
logger_ = spdlog::get("console");
if (! logger_) {
logger_ = spdlog::stdout_logger_mt("console");
}
#ifdef DEBUG_OUTPUT
spdlog::set_level(spdlog::level::debug);
#else
spdlog::set_level(spdlog::level::info);
#endif
size_t log_q_size = 4096;
spdlog::set_async_mode(log_q_size);
num_x_sub_ = get_num_blocks(x_size_, x_sub_size_);
num_y_sub_ = get_num_blocks(y_size_, y_sub_size_);
x_sub_stride_ = x_sub_size_ + 2 * dw_;
y_sub_stride_ = y_sub_size_ + 2 * dw_;
dx_stride_ = dx_ + 2 * dw_;
dy_stride_ = dy_ + 2 * dw_;
z_stride_ = z_size_ + 2 * dw_;
#ifdef DEBUG_OUTPUT
logger_->info("x_size={}, x_sub_size={}, num_x_sub={}, x_sub_stride={}, dx={}, dx_stride={}",
x_size_, x_sub_size_, num_x_sub_, x_sub_stride_, dx_, dx_stride_);
logger_->info("y_size={}, y_sub_size={}, num_y_sub={}, y_sub_stride={}, dy={}, dy_stride={}",
y_size_, y_sub_size_, num_y_sub_, y_sub_stride_, dy_, dy_stride_);
logger_->info("z_size={}, dw={}, z_stride={}", z_size_, dw_, z_stride_);
#endif
dom_data_ = std::make_shared<DomainDataOnHost>(x_size_, y_size_, z_size_);
for (unsigned int i = 0; i < num_gpus_; i++) {
hipSetDevice(i);
subdom_data_[i] = std::make_shared<SubDomainDataOnGPU>(x_sub_stride_, y_sub_stride_, z_stride_, num_streams_);
for (unsigned int j = 0; j < num_streams_; j++) {
subdom_data_[i]->stream_data[j] = std::make_shared<SubDomainDataOnStream>(dx_stride_, dy_stride_, z_stride_);
hipStreamCreate(&subdom_data_[i]->stream_data[j]->stream);
}
}
hipSetDevice(0);
unsigned int idx_gpu = 0;
for (unsigned int y_sub_i = 0; y_sub_i < num_y_sub_; y_sub_i++) {
for (unsigned int x_sub_i = 0; x_sub_i < num_x_sub_; x_sub_i++) {
subdom_data_[idx_gpu]->x_sub_i_list.push_back(x_sub_i);
subdom_data_[idx_gpu]->y_sub_i_list.push_back(y_sub_i);
idx_gpu++;
if (idx_gpu == num_gpus) {
idx_gpu = 0;
}
}
}
}
Sift::~Sift() {
for (unsigned int i = 0; i < num_gpus_; i++) {
for (unsigned int j = 0; j < num_streams_; j++) {
hipStreamDestroy(subdom_data_[i]->stream_data[j]->stream);
}
}
//logger_->flush();
}
void Sift::setImage(const double *img)
{
thrust::copy(img, img + (x_size_ * y_size_ * z_size_), dom_data_->h_image);
}
void Sift::setImage(const std::vector<double>& img)
{
assert((x_size_ * y_size_ * z_size_) == img.size());
thrust::copy(img.begin(), img.end(), dom_data_->h_image);
}
void Sift::setMap(const int8_t *map)
{
thrust::copy(map, map + (x_size_ * y_size_ * z_size_), dom_data_->h_map);
}
void Sift::setMap(const std::vector<int8_t>& map)
{
assert((x_size_ * y_size_ * z_size_) == map.size());
thrust::copy(map.begin(), map.end(), dom_data_->h_map);
}
void Sift::getKeystore(cudautils::Keypoint_store *keystore)
{
keystore->len = dom_data_->keystore->len;
if (keystore->len) {
keystore->buf = (cudautils::Keypoint*) malloc(keystore->len * sizeof(cudautils::Keypoint));
thrust::copy(dom_data_->keystore->buf, dom_data_->keystore->buf + dom_data_->keystore->len, keystore->buf);
}
}
void Sift::getImage(double *img)
{
thrust::copy(dom_data_->h_image, dom_data_->h_image + x_size_ * y_size_ * z_size_, img);
}
void Sift::getImage(std::vector<double>& img)
{
thrust::copy(dom_data_->h_image, dom_data_->h_image + x_size_ * y_size_ * z_size_, img.begin());
}
int Sift::getNumOfGPUTasks(const int gpu_id) {
return subdom_data_[gpu_id]->x_sub_i_list.size();
}
int Sift::getNumOfStreamTasks(
const int gpu_id,
const int stream_id) {
return 1;
}
void Sift::runOnGPU(
const int gpu_id,
const unsigned int gpu_task_id) {
cudaSafeCall(hipSetDevice(gpu_id));
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
std::shared_ptr<SubDomainDataOnStream> stream_data0 = subdom_data->stream_data[0];
unsigned int x_sub_i = subdom_data->x_sub_i_list[gpu_task_id];
unsigned int y_sub_i = subdom_data->y_sub_i_list[gpu_task_id];
#ifdef DEBUG_OUTPUT
CudaTimer timer;
logger_->info("===== gpu_id={} x_sub_i={} y_sub_i={}", gpu_id, x_sub_i, y_sub_i);
#endif
unsigned int x_sub_start = x_sub_i * x_sub_size_;
unsigned int y_sub_start = y_sub_i * y_sub_size_;
// clamp delta to end value
unsigned int x_sub_delta = get_delta(x_size_, x_sub_i, x_sub_size_);
unsigned int y_sub_delta = get_delta(y_size_, y_sub_i, y_sub_size_);
// only add in pad factor at first
unsigned int base_x_sub = (x_sub_i > 0 ? 0 : dw_);
unsigned int base_y_sub = (y_sub_i > 0 ? 0 : dw_);
// subtract pad factor after first
unsigned int padding_x_sub_start = x_sub_start - (x_sub_i > 0 ? dw_ : 0);
unsigned int padding_y_sub_start = y_sub_start - (y_sub_i > 0 ? dw_ : 0);
unsigned int padding_x_sub_delta = x_sub_delta + (x_sub_i > 0 ? dw_ : 0) + (x_sub_i < num_x_sub_ - 1 ? dw_ : 0);
unsigned int padding_y_sub_delta = y_sub_delta + (y_sub_i > 0 ? dw_ : 0) + (y_sub_i < num_y_sub_ - 1 ? dw_ : 0);
// per GPU padded image size
size_t padded_sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;
#ifdef DEBUG_OUTPUT
unsigned int x_sub_end = x_sub_start + x_sub_delta;
unsigned int y_sub_end = y_sub_start + y_sub_delta;
logger_->debug("x_sub=({},{},{}) y_sub=({},{},{})", x_sub_start, x_sub_delta, x_sub_end, y_sub_start, y_sub_delta, y_sub_end);
logger_->debug("base_x_sub={},base_y_sub={}", base_x_sub, base_y_sub);
#ifdef DEBUG_OUTPUT_MATRIX
// print the x, y, z image / map coordinates of the selected keypoints
if (gpu_id == 0) { // don't repeat this for every GPU
for (long long idx=0; idx < x_size_ * y_size_ * z_size_; idx++) {
if (! dom_data_->h_map[idx]) {
unsigned int x;
unsigned int y;
unsigned int z;
ind2sub(x_size_, y_size_, idx, x, y, z);
logger_->info("h_map 0's: idx={}, x={}, y={}, z={}",
idx, x, y, z);
}
}
}
#endif
#endif
// allocate the per GPU padded map and image
int8_t *padded_sub_map;
double *padded_sub_image;
cudaSafeCall(hipHostMalloc(&padded_sub_map, padded_sub_volume_size *
sizeof(int8_t), hipHostMallocPortable));
cudaCheckError();
cudaSafeCall(hipHostMalloc(&padded_sub_image, padded_sub_volume_size *
sizeof(double), hipHostMallocPortable));
cudaCheckError();
// First set all values to holder value -1
thrust::fill(padded_sub_map, padded_sub_map + padded_sub_volume_size, -1);
for (unsigned int k = 0; k < z_size_; k++) {
for (unsigned int j = 0; j < padding_y_sub_delta; j++) {
// get row-major / c-order linear index according orig. dim [x_size, y_size, z_size]
size_t src_idx = dom_data_->sub2ind(padding_x_sub_start, padding_y_sub_start + j, k);
size_t dst_idx = subdom_data->pad_sub2ind(base_x_sub, base_y_sub + j, dw_ + k);
int8_t* src_map_begin = &(dom_data_->h_map[src_idx]);
int8_t* dst_map_begin = &(padded_sub_map[dst_idx]);
// note this assumes the rows to be contiguous in memory (row-order / c-order)
thrust::copy(src_map_begin, src_map_begin + padding_x_sub_delta, dst_map_begin);
double* src_image_begin = &(dom_data_->h_image[src_idx]);
double* dst_image_begin = &(padded_sub_image[dst_idx]);
thrust::copy(src_image_begin, src_image_begin + padding_x_sub_delta, dst_image_begin);
}
}
#ifdef DEBUG_OUTPUT_MATRIX
// print the x, y, z in padded image / map coordinates of the selected keypoints
for (long long i=0; i < padded_sub_volume_size; i++) {
if (!padded_sub_map[i]) {
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, i, padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
unsigned int x = x_sub_start + padding_x - dw_ + 1;
unsigned int y = y_sub_start + padding_y - dw_ + 1;
unsigned int z = padding_z - dw_ + 1;
logger_->info("padded_sub_map 0's (matlab 1-indexed): idx={}, x={}, y={}, z={}",
i, x, y, z);
}
}
#endif
thrust::fill(thrust::device, subdom_data->padded_image, subdom_data->padded_image + padded_sub_volume_size, 0.0);
cudaSafeCall(hipMemcpyAsync(
subdom_data->padded_image,
padded_sub_image,
padded_sub_volume_size * sizeof(double),
hipMemcpyHostToDevice, stream_data0->stream));
#ifdef DEBUG_OUTPUT
cudaSafeCall(hipStreamSynchronize(stream_data0->stream));
logger_->info("transfer image data {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
logger_->info("===== dev image");
print_matrix3d(logger_, x_size_, y_size_, 0, 0, 0, x_size_, y_size_, z_size_, dom_data_->h_image);
print_matrix3d_dev(logger_, x_sub_stride_, y_sub_stride_, z_stride_, 0, 0, 0, x_sub_stride_, y_sub_stride_, z_stride_, subdom_data->padded_image);
#endif
timer.reset();
#endif
cudaSafeCall(hipMemcpyAsync(
subdom_data->padded_map,
padded_sub_map,
padded_sub_volume_size * sizeof(int8_t),
hipMemcpyHostToDevice, stream_data0->stream));
#ifdef DEBUG_OUTPUT
cudaSafeCall(hipStreamSynchronize(stream_data0->stream));
logger_->info("transfer map data {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
logger_->debug("===== dev map");
print_matrix3d(logger_, x_size_, y_size_, 0, 0, 0, x_size_, y_size_, z_size_, dom_data_->h_map);
print_matrix3d_dev(logger_, x_sub_stride_, y_sub_stride_, z_stride_, 0, 0, 0, x_sub_stride_, y_sub_stride_, z_stride_, subdom_data->padded_map);
#endif
timer.reset();
#endif
// clear previous result to zero
thrust::fill(thrust::device, subdom_data->padded_map_idx, subdom_data->padded_map_idx + padded_sub_volume_size, 0.0);
/*Note: padded_sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;*/
auto end_itr = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator<unsigned int>(0), // count indexes from 0
thrust::make_counting_iterator<unsigned int>(padded_sub_volume_size), // ...to padded_sub_volume_size
subdom_data->padded_map, //beginning of stencil sequence
subdom_data->padded_map_idx, // beginning of sequence to copy into
thrust::logical_not<int8_t>());//predicate test on every value
subdom_data->padded_map_idx_size = end_itr - subdom_data->padded_map_idx;
// set all padded map boundaries (still -1) to 0 for correctness to
// distinguish boundaries
thrust::replace(thrust::device, subdom_data->padded_map, subdom_data->padded_map + padded_sub_volume_size, -1, 0);
#ifdef DEBUG_OUTPUT
cudaSafeCall(hipStreamSynchronize(stream_data0->stream));
logger_->info("calculate map idx {}", timer.get_laptime());
logger_->info("padded_map_idx_size={}", subdom_data->padded_map_idx_size);
timer.reset();
#endif
// Each GPU each subdom_data
// this set the dx and dy start idx for each stream
unsigned int num_dx = get_num_blocks(x_sub_delta, dx_);
unsigned int num_dy = get_num_blocks(y_sub_delta, dy_);
unsigned int stream_id = 0;
for (unsigned int dy_i = 0; dy_i < num_dy; dy_i++) {
for (unsigned int dx_i = 0; dx_i < num_dx; dx_i++) {
subdom_data->stream_data[stream_id]->dx_i_list.push_back(dx_i);
subdom_data->stream_data[stream_id]->dy_i_list.push_back(dy_i);
stream_id++;
if (stream_id == num_streams_) {
stream_id = 0;
}
}
}
cudaSafeCall(hipStreamSynchronize(stream_data0->stream));
cudaSafeCall(hipHostFree(padded_sub_map));
cudaSafeCall(hipHostFree(padded_sub_image));
}
const cudautils::SiftParams Sift::get_sift_params() {
return sift_params_;
}
void Sift::postrun() {
// count keypoints
int total_keypoints = 0;
for (int gpu_id = 0; gpu_id < num_gpus_; gpu_id++) {
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
for (int stream_id = 0; stream_id < num_streams_; stream_id++) {
std::shared_ptr<SubDomainDataOnStream> stream_data =
subdom_data->stream_data[stream_id];
/*logger_->info("gpu_id {}, streamid {}, # of kypts {}", gpu_id, stream_id, stream_data->keystore.size());*/
total_keypoints += stream_data->keystore.size();
}
}
// allocate for number of keypoints
dom_data_->keystore->len = total_keypoints;
/*logger_->info("total_keypoints {}", total_keypoints);*/
if (total_keypoints < 1)
return;
hipHostMalloc(&(dom_data_->keystore->buf), dom_data_->keystore->len *
sizeof(cudautils::Keypoint), hipHostMallocPortable);
// copy keypoints to host
int counter = 0;
for (int gpu_id = 0; gpu_id < num_gpus_; gpu_id++) {
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
for (int stream_id = 0; stream_id < num_streams_; stream_id++) {
std::shared_ptr<SubDomainDataOnStream> stream_data =
subdom_data->stream_data[stream_id];
for (int i = 0; i < stream_data->keystore.size(); i++) {
dom_data_->keystore->buf[counter] = stream_data->keystore[i];
counter++;
}
}
}
assert(counter == total_keypoints);
return;
}
void Sift::runOnStream(
const int gpu_id,
const int stream_id,
const unsigned int gpu_task_id) {
hipSetDevice(gpu_id);
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
std::shared_ptr<SubDomainDataOnStream> stream_data = subdom_data->stream_data[stream_id];
unsigned int x_sub_i = subdom_data->x_sub_i_list[gpu_task_id];
unsigned int y_sub_i = subdom_data->y_sub_i_list[gpu_task_id];
unsigned int x_sub_delta = get_delta(x_size_, x_sub_i, x_sub_size_);
unsigned int y_sub_delta = get_delta(y_size_, y_sub_i, y_sub_size_);
unsigned int x_sub_start = x_sub_i * x_sub_size_;
unsigned int y_sub_start = y_sub_i * y_sub_size_;
#ifdef DEBUG_OUTPUT
CudaTimer timer(stream_data->stream);
#endif
// each stream has a individual subsections of data, that each kernel call will operate on
// these subsections start/stop idx are determined by dx_i and dy_i lists
for (auto dx_itr = stream_data->dx_i_list.begin(), dy_itr = stream_data->dy_i_list.begin();
dx_itr != stream_data->dx_i_list.end() || dy_itr != stream_data->dy_i_list.end();
dx_itr++, dy_itr++) {
unsigned int dx_i = *dx_itr;
unsigned int dy_i = *dy_itr;
unsigned int dx_start = dx_i * dx_;
unsigned int dx_delta = get_delta(x_sub_delta, dx_i, dx_);
unsigned int dx_end = dx_start + dx_delta;
unsigned int dy_start = dy_i * dy_;
unsigned int dy_delta = get_delta(y_sub_delta, dy_i, dy_);
unsigned int dy_end = dy_start + dy_delta;
#ifdef DEBUG_OUTPUT
logger_->info("dx_i={}, dy_i={}", dx_i, dy_i);
logger_->info("x=({},{},{}) y=({},{},{}), dw={}", dx_start, dx_delta, dx_end, dy_start, dy_delta, dy_end, dw_);
logger_->info("subdom_data->padded_map_idx_size={}", subdom_data->padded_map_idx_size);
#endif
// create each substream data on device
long long int *substream_padded_map_idx;
cudaSafeCall(hipMalloc(&substream_padded_map_idx,
subdom_data->padded_map_idx_size * sizeof(long long int)));
RangeCheck range_check { x_sub_stride_, y_sub_stride_,
dx_start + dw_, dx_end + dw_, dy_start + dw_, dy_end + dw_, dw_, z_size_ + dw_ };
// copy the relevant (in range) idx elements from the
// global GPU padded_map_idx to the local substream_padded_map_idx
auto end_itr = thrust::copy_if(
thrust::device,
subdom_data->padded_map_idx,
subdom_data->padded_map_idx + subdom_data->padded_map_idx_size,
substream_padded_map_idx,
range_check);
const unsigned int substream_padded_map_idx_size = end_itr - substream_padded_map_idx;
#ifdef DEBUG_OUTPUT
logger_->info("substream_padded_map_idx_size={}", substream_padded_map_idx_size);
logger_->info("transfer map idx {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
cudaSafeCall(hipStreamSynchronize(stream_data->stream));
thrust::device_vector<long long int> dbg_d_padded_map_idx(substream_padded_map_idx,
substream_padded_map_idx + substream_padded_map_idx_size);
thrust::host_vector<unsigned int> dbg_h_padded_map_idx(dbg_d_padded_map_idx);
for (unsigned int i = 0; i < substream_padded_map_idx_size; i++) {
logger_->debug("substream_padded_map_idx={}", dbg_h_padded_map_idx[i]);
}
#endif
timer.reset();
#endif
if (substream_padded_map_idx_size == 0) {
#ifdef DEBUG_OUTPUT
logger_->debug("no map to be padded");
#endif
continue;
}
// only calculate location and save keypoints
if (sift_params_.skipDescriptor) {
#ifdef DEBUG_OUTPUT
logger_->debug("Skip calculatation of descriptors");
#endif
// transfer index map to host for referencing correct index
long long int *h_padded_map_idx;
cudaSafeCall(hipHostMalloc((void **) &h_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
hipHostMallocPortable));
cudaSafeCall(hipMemcpyAsync(
h_padded_map_idx,
substream_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
hipMemcpyDeviceToHost, stream_data->stream));
// make sure all async memcpys (above) are finished before access
cudaSafeCall(hipStreamSynchronize(stream_data->stream));
// save data for all streams to global Sift object store
for (int i = 0; i < substream_padded_map_idx_size; i++) {
Keypoint temp;
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, h_padded_map_idx[i], padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
temp.x = x_sub_start + padding_x - dw_ + 1;
temp.y = y_sub_start + padding_y - dw_ + 1;
temp.z = padding_z - dw_ + 1;
stream_data->keystore.push_back(temp);
}
cudaSafeCall(hipFree(substream_padded_map_idx));
continue; // do this for every substream forloop
}
/*
Create an array to hold each descriptor ivec vector on VRAM
essentially a matrix of substream_padded_map_idx_size by descriptor length
*/
double *descriptors, *yy_scratch, *ori_scratch;
uint16_t * idx_scratch, *ori_idx_scratch;
long desc_mem_size = sift_params_.descriptor_len *
substream_padded_map_idx_size * sizeof(double);
cudaSafeCall(hipMalloc(&descriptors, desc_mem_size));
// default nFaces 80; 640 bytes per keypoint yy
cudaSafeCall(hipMalloc(&yy_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(double)));
// 160 bytes per keypoint idx
cudaSafeCall(hipMalloc(&idx_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(uint16_t)));
if (sift_params_.TwoPeak_Flag) {
// default nFaces=80
cudaSafeCall(hipMalloc(&ori_idx_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(uint16_t)));
cudaSafeCall(hipMalloc(&ori_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(double)));
}
// One keypoint per thread, one thread per block
unsigned int num_threads = 1;
// round up by number of threads per block, to calc num of blocks
unsigned int num_blocks = get_num_blocks(substream_padded_map_idx_size, num_threads);
#ifdef DEBUG_OUTPUT
/*cudaSafeCall(hipStreamSynchronize(stream_data->stream));*/
logger_->debug("num_blocks={}", num_blocks);
logger_->debug("num_threads={}", num_threads);
#endif
if (num_blocks * num_threads < substream_padded_map_idx_size) {
logger_->info("Error occured in numblocks and num_threads estimation... returning from stream");
return;
}
#ifdef DEBUG_OUTPUT
logger_->debug("create_descriptor");
timer.reset();
#endif
// sift_params.fv_centers must be placed on device since array passed to cuda kernel
double* device_centers;
// default fv_centers_len 80 * 3 (3D) = 240;
cudaSafeCall(hipMalloc((void **) &device_centers,
sizeof(double) * sift_params_.fv_centers_len));
cudaSafeCall(hipMemcpy((void *) device_centers, (const void *) fv_centers_,
(size_t) sizeof(double) * sift_params_.fv_centers_len,
hipMemcpyHostToDevice));
#ifdef DEBUG_OUTPUT_MATRIX
/*printf("Print image\n");*/
/*hipStreamSynchronize(stream_data->stream);*/
/*int sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;*/
/*double* dbg_h_image = (double*) malloc(sizeof(double) * sub_volume_size);*/
/*cudaSafeCall(hipMemcpy((void **) dbg_h_image, subdom_data->padded_image,*/
/*sizeof(double) * sub_volume_size,*/
/*hipMemcpyDeviceToHost));*/
/*// print*/
/*for (int i=0; i < sub_volume_size; i++) {*/
/*if (dbg_h_image[i] != 0.0) {*/
/*printf("host image[%d]: %f\n", i, dbg_h_image[i]);*/
/*}*/
/*}*/
#endif
hipLaunchKernelGGL(( create_descriptor), dim3(num_blocks), dim3(num_threads), 0, stream_data->stream,
x_sub_stride_, y_sub_stride_, x_sub_start, y_sub_start,
dw_, // pad width
substream_padded_map_idx_size, // total number of keypoints to process
substream_padded_map_idx, //substream map, filtered linear idx into per GPU padded_map and padded_image
subdom_data->padded_map,//global map split per GPU
subdom_data->padded_image,//image split per GPU
sift_params_,
device_centers,
descriptors,
idx_scratch,
yy_scratch,
ori_idx_scratch,
ori_scratch);
cudaCheckError();
#ifdef DEBUG_OUTPUT
logger_->info("create descriptors elapsed: {}", timer.get_laptime());
timer.reset();
#endif
// transfer vector descriptors via host pinned memory for faster async cpy
double *h_descriptors;
cudaSafeCall(hipHostMalloc((void **) &h_descriptors, desc_mem_size, hipHostMallocPortable));
cudaSafeCall(hipMemcpyAsync(
h_descriptors,
descriptors,
desc_mem_size,
hipMemcpyDeviceToHost, stream_data->stream));
// transfer index map to host for referencing correct index
long long int *h_padded_map_idx;
cudaSafeCall(hipHostMalloc((void **) &h_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
hipHostMallocPortable));
cudaSafeCall(hipMemcpyAsync(
h_padded_map_idx,
substream_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
hipMemcpyDeviceToHost, stream_data->stream));
#ifdef DEBUG_OUTPUT_MATRIX
for (int i=0; i < substream_padded_map_idx_size; i++) {
printf("h_padded_map_idx:%lld\n", h_padded_map_idx[i]);
if (i % sift_params_.descriptor_len == 0) {
printf("\n\nDescriptor:%d\n", (int) i / sift_params_.descriptor_len);
}
printf("%d: %d\n", i, h_descriptors[i]);
}
#endif
// make sure all async memcpys (above) are finished before access
cudaSafeCall(hipStreamSynchronize(stream_data->stream));
// save data for all streams to global Sift object store
int skip_counter = 0;
for (int i = 0; i < substream_padded_map_idx_size; i++) {
Keypoint temp;
if (sift_params_.TwoPeak_Flag) {
if (h_padded_map_idx[i] == -1) {
skip_counter++;
continue;
}
}
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, h_padded_map_idx[i], padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
temp.x = x_sub_start + padding_x - dw_ + 1;
temp.y = y_sub_start + padding_y - dw_ + 1;
temp.z = padding_z - dw_ + 1;
temp.ivec = (double*) malloc(sift_params_.descriptor_len * sizeof(double));
memcpy(temp.ivec, &(h_descriptors[i * sift_params_.descriptor_len]),
sift_params_.descriptor_len * sizeof(double));
temp.xyScale = sift_params_.xyScale;
temp.tScale = sift_params_.tScale;
// buffer the size of the whole image
stream_data->keystore.push_back(temp);
}
cudaSafeCall(hipFree(substream_padded_map_idx));
cudaSafeCall(hipFree(descriptors));
cudaSafeCall(hipFree(device_centers));
cudaSafeCall(hipFree(idx_scratch));
cudaSafeCall(hipFree(yy_scratch));
if (sift_params_.TwoPeak_Flag) {
cudaSafeCall(hipFree(ori_idx_scratch));
cudaSafeCall(hipFree(ori_scratch));
}
cudaSafeCall(hipHostFree(h_descriptors));
cudaSafeCall(hipHostFree(h_padded_map_idx));
#ifdef DEBUG_OUTPUT
logger_->info("gpu:{}, stream:{}, substream_padded_map_idx_size={}, saved={}",
gpu_id, stream_id, substream_padded_map_idx_size,
substream_padded_map_idx_size - skip_counter);
logger_->info("transfer d2h and copy descriptor ivec values {}", timer.get_laptime());
#endif
}
}
} // namespace cudautils
|
afede91f58a662f1ffc76a0dd60c3f3dd6397936.cu
|
#include <iostream>
#include <future>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <cuda_runtime.h>
#include <cmath>
#include "math.h"
#include "sift.h"
#include "matrix_helper.h"
#include "cuda_timer.h"
#include "spdlog/spdlog.h"
namespace cudautils {
template<typename T>
struct greater_tol : public thrust::binary_function<T, T, bool>
{
// tolerance to compare doubles (15 decimal places)
__host__ __device__ bool operator()(const T &lhs, const T &rhs) const {
if (fabs(lhs - rhs) <= .000000000000001)
return false;
return lhs > rhs;
}
};
struct is_negative {
__host__ __device__ bool operator() (const long long a) const {
return a < 0;
}
};
struct isnan_test {
__host__ __device__ bool operator() (const float a) const {
return isnan(a);
}
};
// row major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__forceinline__ __device__
int bin_sub2ind_row(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params) {
return (int) l + sift_params.nFaces * (k + j * sift_params.IndexSize + i
* pow(sift_params.IndexSize, 2));
}
// column major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__forceinline__ __device__
int bin_sub2ind(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params) {
return (int) i + j * sift_params.IndexSize + k * pow(sift_params.IndexSize, 2)
+ l * pow(sift_params.IndexSize, 3);
}
// column major order index into the descriptor vector
// note the descriptor vector length is determined by
// sift_params.IndexSize ^ 3 * sift_params.nFaces
// this is why i, j, and k are dimensions of stride sift_params.IndexSize
__global__
void bin_sub2ind_wrap(int i, int j, int k, uint16_t l, const cudautils::SiftParams sift_params, int* ind) {
*ind = bin_sub2ind(i, j, k, l, sift_params);
return ;
}
__forceinline__ __device__
void place_in_index(double* index, double mag, int i, int j, int k,
double* yy, uint16_t* ix, long long idx, const cudautils::SiftParams sift_params) {
double tmpsum = 0.0;
int bin_index;
if (sift_params.Smooth_Flag) {
for (int tessel=0; tessel < sift_params.Tessel_thresh; tessel++) {
tmpsum += pow(yy[tessel], sift_params.Smooth_Var);
}
// Add three nearest tesselation faces
for (int ii=0; ii < sift_params.Tessel_thresh; ii++) {
bin_index = bin_sub2ind(i, j, k, ix[ii], sift_params);
#ifdef DEBUG_NUMERICAL
printf("i%d j%d k%d ix[ii]%d bin_index%d yy[ii]%.54f, index+=%.54f, idx%lld\n",
i, j, k, ix[ii], bin_index, yy[ii], mag * pow(yy[ii],
sift_params.Smooth_Var ) / tmpsum, idx);
#endif
index[bin_index] += mag * pow(yy[ii], sift_params.Smooth_Var ) / tmpsum;
}
} else {
bin_index = bin_sub2ind(i, j, k, ix[0], sift_params);
index[bin_index] += mag;
}
return;
}
// matrix multiply in row memory order
// first is a matrix in row order
// second is the array multiply
// assumes length of second = cols of first
__forceinline__ __device__
void dot_product(double* first, double* second, double* out, int rows,
int cols) {
for (int i=0; i < rows; i++) {
double sum = 0.0;
for (int j=0; j < cols; j++) {
sum += first[j + i * cols] * second[j];
}
out[i] = sum;
}
}
// matrix multiply in row memory order
// first is a matrix in row order
// second is the array multiply
// assumes length of second = cols of first
__global__
void dot_product_wrap(double* first, double* second, double* out, int rows,
int cols) {
dot_product(first, second, out, rows, cols);
return;
}
// matrix multiply in col memory order
// first is a matrix in column order
// second is the array multiply
// assumes length of second = cols of first
__forceinline__ __device__
void dot_product_col_ord(double* first, double* second, double* out, int rows,
int cols) {
for (int i=0; i < rows; i++) {
double sum = 0.0;
for (int j=0; j < cols; j++) {
sum += first[i + j * rows] * second[j];
}
out[i] = sum;
}
}
// assumes r,c,s lie within accessible image boundaries
__forceinline__ __device__
double get_grad_ori_vector(double* image, long long idx, unsigned int
x_stride, unsigned int y_stride, int r, int c, int t, double vect[3],
double* yy, uint16_t* ix, const cudautils::SiftParams sift_params,
double* device_centers) {
int last_row = sift_params.image_size0 - 1;
int last_col = sift_params.image_size1 - 1;
int last_slice = sift_params.image_size2 - 1;
/* this is literal translation from Scovanner et al. 3DSIFT,
even though it seems xgrad and ygrad are switched, and ygrad seems to be
in wrong direction. Protect edge cases explicitly rather than
by padding
*/
double xgrad, ygrad, zgrad;
if (c == 0) {
xgrad = 2.0 * (image[idx + x_stride] - image[idx]);
} else if (c == last_col) {
xgrad = 2.0 * (image[idx] - image[idx - x_stride]);
} else {
xgrad = image[idx + x_stride] - image[idx - x_stride];
}
if (r == 0) {
ygrad = 2.0 * (image[idx] - image[idx + 1]);
} else if (r == last_row) {
ygrad = 2.0 * (image[idx - 1] - image[idx]);
} else {
ygrad = image[idx - 1] - image[idx + 1];
}
if (t == 0) {
zgrad = 2.0 * (image[idx + x_stride * y_stride] - image[idx]);
} else if (t == last_slice) {
zgrad = 2.0 * (image[idx] - image[idx - x_stride * y_stride]);
} else {
zgrad = image[idx + x_stride * y_stride] - image[idx - x_stride * y_stride];
}
double mag = sqrt(xgrad * xgrad + ygrad * ygrad + zgrad * zgrad);
xgrad /= mag;
ygrad /= mag;
zgrad /= mag;
if (mag != 0.0) {
vect[0] = xgrad;
vect[1] = ygrad;
vect[2] = zgrad;
} else {
vect[0] = 1.0;
vect[1] = 0.0;
vect[2] = 0.0;
}
//Find the nearest tesselation face indices
// N = sift_params.nFaces
int N = sift_params.fv_centers_len / DIMS;
dot_product(device_centers, vect, yy, N, DIMS);
// overwrite idxs 1 : N, N can not exceed the length of ori_hist
thrust::sequence(thrust::device, ix, ix + sift_params.nFaces);
thrust::stable_sort_by_key(thrust::device, yy, yy + sift_params.nFaces, ix, thrust::greater<double>());
#ifdef DEBUG_NUMERICAL
printf("ggov N%d fv_len%d DIMS%d idx%lld vect0 %.4f vect1 %.4f vect2 %.4f image[idx] %.4f r%d c%d t%d yy %.4f %.4f %.4f %.4f ix %d %d %d %d eq:%d diff:%.54f\n",
N, sift_params.fv_centers_len, DIMS, idx, vect[0], vect[1], vect[2],
image[idx], r, c, t, yy[0], yy[1], yy[2], yy[3], ix[0], ix[1], ix[2],
ix[3], yy[2] == yy[3], yy[2] - yy[3]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[0], device_centers[3 * ix[0]], device_centers[3 * ix[0] + 1], device_centers[3 * ix[0] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[1], device_centers[3 * ix[1]], device_centers[3 * ix[1] + 1], device_centers[3 * ix[1] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[2], device_centers[3 * ix[2]], device_centers[3 * ix[2] + 1], device_centers[3 * ix[2] + 2]);
printf("fv[%d] %.4f %.4f %.4f\n", ix[3], device_centers[3 * ix[3]], device_centers[3 * ix[3] + 1], device_centers[3 * ix[3] + 2]);
#endif
return mag;
}
__global__
void get_grad_ori_vector_wrap(double* image, long long idx, unsigned int
x_stride, unsigned int y_stride, int r, int c, int t, double vect[3], double* yy, uint16_t* ix,
const cudautils::SiftParams sift_params, double* device_centers, double* mag) {
*mag = cudautils::get_grad_ori_vector(image,
idx, x_stride, y_stride, r, c, t, vect,
yy, ix, sift_params, device_centers);
return;
}
/*r, c, s is the pixel index (x, y, z dimensions respect.) in the image within the radius of the */
/*keypoint before clamped*/
/*For each pixel, take a neighborhhod of xyradius and tiradius,*/
/*bin it down to the sift_params.IndexSize dimensions*/
/*thus, i_indx, j_indx, s_indx represent the binned index within the radius of the keypoint*/
__forceinline__ __device__
void add_sample(double* index, double* image, double distsq, long long
idx, unsigned int x_stride, unsigned int y_stride, int i_bin, int j_bin, int k_bin,
int r, int c, int t, const cudautils::SiftParams sift_params, double*
device_centers, uint16_t* ix, double* yy) {
double sigma = sift_params.SigmaScaled;
double weight = exp(-(distsq / (2.0 * sigma * sigma)));
double vect[3] = {0.0, 0.0, 0.0};
// gradient and orientation vectors calculated from 3D halo/neighboring
// pixels
double mag = get_grad_ori_vector(image, idx, x_stride, y_stride, r, c, t,
vect, yy, ix, sift_params, device_centers);
mag *= weight; // scale magnitude by gaussian
place_in_index(index, mag, i_bin, j_bin, k_bin, yy, ix, idx, sift_params);
return;
}
// floor quotient, add 1
// clamp bin idx to IndexSize
__forceinline__ __device__
int get_bin_idx(int orig, int radius, int IndexSize) {
int idx = (int) floor((orig + radius) / (2.0 * (double) radius / IndexSize));
if (idx >= IndexSize) // clamp to IndexSize
idx = IndexSize - 1;
return idx;
}
// floor quotient, add 1
// clamp bin idx to IndexSize
__global__
void get_bin_idx_wrap(int orig, int radius, int IndexSize, int* idx) {
*idx = get_bin_idx(orig, radius, IndexSize);
return;
}
__forceinline__ __device__
double* key_sample(const cudautils::SiftParams sift_params,
cudautils::Keypoint key, double* image, long long idx,
unsigned int x_stride, unsigned int y_stride,
double* device_centers, uint16_t* ix, double* yy,
double* index) {
double xySpacing = (double) sift_params.xyScale * sift_params.MagFactor;
double tSpacing = (double) sift_params.tScale * sift_params.MagFactor;
int xyiradius = rint(1.414 * xySpacing * (sift_params.IndexSize + 1) / 2.0);
int tiradius = rint(1.414 * tSpacing * (sift_params.IndexSize + 1) / 2.0);
// Surrounding radius of pixels are binned for computation
// according to sift_params.IndexSize
int r, c, t, i_bin, j_bin, k_bin;
double distsq;
long long update_idx;
for (int i = -xyiradius; i <= xyiradius; i++) {
for (int j = -xyiradius; j <= xyiradius; j++) {
for (int k = -tiradius; k <= tiradius; k++) {
distsq = (double) pow(i,2) + pow(j,2) + pow(k,2);
// Find bin idx
i_bin = get_bin_idx(i, xyiradius, sift_params.IndexSize);
j_bin = get_bin_idx(j, xyiradius, sift_params.IndexSize);
k_bin = get_bin_idx(k, tiradius, sift_params.IndexSize);
// Find original image pixel idx
r = key.x + i;
c = key.y + j;
t = key.z + k;
// only add if within image range
if (!(r < 0 || r >= sift_params.image_size0 ||
c < 0 || c >= sift_params.image_size1
|| t < 0 || t >= sift_params.image_size2)) {
// image is assumed as column order
// make sure it isn't cast to unsigned
update_idx = (long long) idx + i + (int) x_stride * j +
(int) x_stride * (int) y_stride * k;
add_sample(index, image, distsq, update_idx, x_stride, y_stride,
i_bin, j_bin, k_bin, r, c, t, sift_params,
device_centers, ix, yy);
}
}
}
}
return index;
}
__forceinline__ __device__
double* build_ori_hists(int x, int y, int z, long long idx, unsigned int
x_stride, unsigned int y_stride, int radius, double* image,
const cudautils::SiftParams sift_params, double* device_centers,
uint16_t* ix, double* yy, double* ori_hist) {
double mag;
double vect[3] = {0.0, 0.0, 0.0};
int r, c, t;
long long update_idx;
for (int i = -radius; i <= radius; i++) {
for (int j = -radius; j <= radius; j++) {
for (int k = -radius; k <= radius; k++) {
// Find original image pixel idx
r = x + i;
c = y + j;
t = z + k;
// only add if within image range
// NOTE from original source
// Do not use last row or column, which are not valid.
if (!(r < 0 || r >= sift_params.image_size0 - 2 ||
c < 0 || c >= sift_params.image_size1 - 2 ||
t < 0 || t >= sift_params.image_size2 - 2)) {
// image is assumed as column order
// make sure it isn't cast to unsigned
update_idx = (long long) idx + i + (int) x_stride * j +
(int) x_stride * (int) y_stride * k;
/*gradient and orientation vectors calculated from 3D halo/neighboring pixels*/
mag = get_grad_ori_vector(image, update_idx, x_stride, y_stride,
r, c, t, vect, yy, ix, sift_params, device_centers);
ori_hist[ix[0]] += mag;
}
}
}
}
return ori_hist;
}
__forceinline__ __device__
void normalize_arr(double* arr, int len) {
double sqlen = 0.0;
for (int i=0; i < len; i++) {
sqlen += arr[i] * arr[i];
}
double fac = 1.0 / sqrt(sqlen);
for (int i=0; i < len; i++) {
arr[i] = arr[i] * fac;
}
return;
}
__forceinline__ __device__
cudautils::Keypoint make_keypoint_sample(cudautils::Keypoint key, double*
image, const cudautils::SiftParams sift_params, unsigned int thread_idx, long long idx,
unsigned int x_stride, unsigned int y_stride, double * descriptors,
double* device_centers, uint16_t* ix, double* yy) {
bool changed = false;
// default N=640; 5120 bytes
int N = sift_params.descriptor_len;
double* index = &(descriptors[thread_idx * sift_params.descriptor_len]);
memset(index, 0.0, N * sizeof(double));
key_sample(sift_params, key, image, idx, x_stride, y_stride,
device_centers, ix, yy, index);
#ifdef DEBUG_NUMERICAL
for (int i=0; i < sift_params.descriptor_len; i++) {
if (index[i] != 0)
printf("index[%d]=%.4f\n",i, index[i]);
}
printf("\n");
#endif
normalize_arr(index, N);
for (int i=0; i < N; i++) {
if (index[i] > sift_params.MaxIndexVal) {
index[i] = sift_params.MaxIndexVal;
changed = true;
}
}
if (changed) {
normalize_arr(index, N);
}
int intval;
for (int i=0; i < N; i++) {
intval = rint(512.0 * index[i]);
index[i] = (double) min(255, intval);
}
return key;
}
__forceinline__ __device__
cudautils::Keypoint make_keypoint(double* image, int x, int y, int z,
unsigned int thread_idx, long long idx, unsigned int x_stride, unsigned int y_stride,
const cudautils::SiftParams sift_params, double * descriptors, double*
device_centers, uint16_t* ix, double* yy) {
cudautils::Keypoint key;
key.x = x;
key.y = y;
key.z = z;
return make_keypoint_sample(key, image, sift_params, thread_idx, idx,
x_stride, y_stride, descriptors, device_centers, ix, yy);
}
/* Main function of 3DSIFT Program from http://www.cs.ucf.edu/~pscovann/
Inputs:
image - a 3 dimensional matrix of double
xyScale and tScale - affects both the scale and the resolution, these are
usually set to 1 and scaling is done before calling this function
x, y, and z - the location of the center of the keypoint where a descriptor is requested
Outputs:
keypoint - the descriptor, varies in size depending on values in LoadParams.m
reRun - a flag (0 or 1) which is set if the data at (x,y,z) is not
descriptive enough for a good keypoint
*/
__global__
void create_descriptor(
unsigned int x_stride,
unsigned int y_stride,
unsigned int x_sub_start,
unsigned int y_sub_start,
unsigned int dw,
const unsigned int map_idx_size,
long long *map_idx,
int8_t *map,
double *image,
const cudautils::SiftParams sift_params,
double* device_centers,
double *descriptors,
uint16_t* idx_scratch,
double* yy_scratch,
uint16_t* ori_idx_scratch,
double* ori_scratch) {
// thread per keypoint in this substream
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= map_idx_size) return;
// map_idx holds the relevant image idxs only for the substream
// map_idx_size matchs total # of threads
// idx describes the linear index for current GPUs section of the image and corresponding map
long long idx = map_idx[thread_idx];
// column-major order since image is from matlab
int x, y, z;
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_stride, y_stride, idx, padding_x, padding_y, padding_z);
// correct for dw_ padding, 0-indexed for checking boundaries
x = x_sub_start + padding_x - dw;
y = y_sub_start + padding_y - dw;
z = padding_z - dw;
uint16_t* ix = (uint16_t*) &(idx_scratch[thread_idx * sift_params.nFaces]);
cudaCheckPtrDevice(ix);
thrust::sequence(thrust::device, ix, ix + sift_params.nFaces);
double *yy = (double*) &(yy_scratch[thread_idx * sift_params.nFaces]);
cudaCheckPtrDevice(yy);
if (sift_params.TwoPeak_Flag) {
int radius = rint(sift_params.xyScale * 3.0);
// init ori hist indices
int ori_hist_len = sift_params.nFaces; //default 80
uint16_t* ori_hist_idx = &(ori_idx_scratch[ori_hist_len * thread_idx]);
cudaCheckPtrDevice(ori_hist_idx);
thrust::sequence(thrust::device, ori_hist_idx, ori_hist_idx + ori_hist_len);
//init ori histogram
double* ori_hist = &(ori_scratch[ori_hist_len * thread_idx]);
cudaCheckPtrDevice(ori_hist);
memset(ori_hist, 0.0, ori_hist_len * sizeof(double));
build_ori_hists(x, y, z, idx, x_stride, y_stride, radius, image,
sift_params, device_centers, ix, yy, ori_hist);
// descending order according to ori_hist
thrust::stable_sort_by_key(thrust::device, ori_hist, ori_hist +
ori_hist_len, ori_hist_idx, thrust::greater<double>());
double prod01, prod02;
dot_product(&(device_centers[DIMS * ori_hist_idx[0]]),
&(device_centers[DIMS * ori_hist_idx[1]]), &prod01, 1, DIMS);
dot_product(&(device_centers[DIMS * ori_hist_idx[0]]),
&(device_centers[DIMS * ori_hist_idx[2]]), &prod02, 1, DIMS);
#ifdef DEBUG_NUMERICAL
printf("TPF x%d y%d z%d ori_hist %.25f %.25f %.25f ori_hist_idx %d %d %d %d prod01 %.25f prod02 %.25f eq:%d diff:%.54f\n",
x, y, z, ori_hist[0], ori_hist[1], ori_hist[2], ori_hist_idx[0], ori_hist_idx[1], ori_hist_idx[2], ori_hist_idx[3],
prod01, prod02, ori_hist[2] == ori_hist[3], ori_hist[2] - ori_hist[3]);
#endif
if ( ( prod01 > sift_params.TwoPeak_Thresh) &&
( prod02 > sift_params.TwoPeak_Thresh) ) {
// mark this keypoint as null in map
map_idx[thread_idx] = -1;
#ifdef DEBUG_OUTPUT
printf("Removed keypoint from thread: %u, desc index: %lld, x:%d y:%d z:%d\n",
thread_idx, idx, x, y, z);
#endif
return ;
}
}
cudautils::Keypoint key = make_keypoint(image, x, y, z, thread_idx, idx,
x_stride, y_stride, sift_params, descriptors, device_centers, ix,
yy);
return;
}
/*Define the constructor for the SIFT class*/
/*See the class Sift definition in sift.h*/
Sift::Sift(
const unsigned int x_size,
const unsigned int y_size,
const unsigned int z_size,
const unsigned int x_sub_size,
const unsigned int y_sub_size,
const unsigned int dx,
const unsigned int dy,
const unsigned int dw,
const unsigned int num_gpus,
const unsigned int num_streams,
const cudautils::SiftParams sift_params,
const double* fv_centers)
: x_size_(x_size), y_size_(y_size), z_size_(z_size),
x_sub_size_(x_sub_size), y_sub_size_(y_sub_size),
dx_(dx), dy_(dy), dw_(dw),
num_gpus_(num_gpus), num_streams_(num_streams),
sift_params_(sift_params),
fv_centers_(fv_centers),
subdom_data_(num_gpus) {
logger_ = spdlog::get("console");
if (! logger_) {
logger_ = spdlog::stdout_logger_mt("console");
}
#ifdef DEBUG_OUTPUT
spdlog::set_level(spdlog::level::debug);
#else
spdlog::set_level(spdlog::level::info);
#endif
size_t log_q_size = 4096;
spdlog::set_async_mode(log_q_size);
num_x_sub_ = get_num_blocks(x_size_, x_sub_size_);
num_y_sub_ = get_num_blocks(y_size_, y_sub_size_);
x_sub_stride_ = x_sub_size_ + 2 * dw_;
y_sub_stride_ = y_sub_size_ + 2 * dw_;
dx_stride_ = dx_ + 2 * dw_;
dy_stride_ = dy_ + 2 * dw_;
z_stride_ = z_size_ + 2 * dw_;
#ifdef DEBUG_OUTPUT
logger_->info("x_size={}, x_sub_size={}, num_x_sub={}, x_sub_stride={}, dx={}, dx_stride={}",
x_size_, x_sub_size_, num_x_sub_, x_sub_stride_, dx_, dx_stride_);
logger_->info("y_size={}, y_sub_size={}, num_y_sub={}, y_sub_stride={}, dy={}, dy_stride={}",
y_size_, y_sub_size_, num_y_sub_, y_sub_stride_, dy_, dy_stride_);
logger_->info("z_size={}, dw={}, z_stride={}", z_size_, dw_, z_stride_);
#endif
dom_data_ = std::make_shared<DomainDataOnHost>(x_size_, y_size_, z_size_);
for (unsigned int i = 0; i < num_gpus_; i++) {
cudaSetDevice(i);
subdom_data_[i] = std::make_shared<SubDomainDataOnGPU>(x_sub_stride_, y_sub_stride_, z_stride_, num_streams_);
for (unsigned int j = 0; j < num_streams_; j++) {
subdom_data_[i]->stream_data[j] = std::make_shared<SubDomainDataOnStream>(dx_stride_, dy_stride_, z_stride_);
cudaStreamCreate(&subdom_data_[i]->stream_data[j]->stream);
}
}
cudaSetDevice(0);
unsigned int idx_gpu = 0;
for (unsigned int y_sub_i = 0; y_sub_i < num_y_sub_; y_sub_i++) {
for (unsigned int x_sub_i = 0; x_sub_i < num_x_sub_; x_sub_i++) {
subdom_data_[idx_gpu]->x_sub_i_list.push_back(x_sub_i);
subdom_data_[idx_gpu]->y_sub_i_list.push_back(y_sub_i);
idx_gpu++;
if (idx_gpu == num_gpus) {
idx_gpu = 0;
}
}
}
}
Sift::~Sift() {
for (unsigned int i = 0; i < num_gpus_; i++) {
for (unsigned int j = 0; j < num_streams_; j++) {
cudaStreamDestroy(subdom_data_[i]->stream_data[j]->stream);
}
}
//logger_->flush();
}
void Sift::setImage(const double *img)
{
thrust::copy(img, img + (x_size_ * y_size_ * z_size_), dom_data_->h_image);
}
void Sift::setImage(const std::vector<double>& img)
{
assert((x_size_ * y_size_ * z_size_) == img.size());
thrust::copy(img.begin(), img.end(), dom_data_->h_image);
}
void Sift::setMap(const int8_t *map)
{
thrust::copy(map, map + (x_size_ * y_size_ * z_size_), dom_data_->h_map);
}
void Sift::setMap(const std::vector<int8_t>& map)
{
assert((x_size_ * y_size_ * z_size_) == map.size());
thrust::copy(map.begin(), map.end(), dom_data_->h_map);
}
void Sift::getKeystore(cudautils::Keypoint_store *keystore)
{
keystore->len = dom_data_->keystore->len;
if (keystore->len) {
keystore->buf = (cudautils::Keypoint*) malloc(keystore->len * sizeof(cudautils::Keypoint));
thrust::copy(dom_data_->keystore->buf, dom_data_->keystore->buf + dom_data_->keystore->len, keystore->buf);
}
}
void Sift::getImage(double *img)
{
thrust::copy(dom_data_->h_image, dom_data_->h_image + x_size_ * y_size_ * z_size_, img);
}
void Sift::getImage(std::vector<double>& img)
{
thrust::copy(dom_data_->h_image, dom_data_->h_image + x_size_ * y_size_ * z_size_, img.begin());
}
int Sift::getNumOfGPUTasks(const int gpu_id) {
return subdom_data_[gpu_id]->x_sub_i_list.size();
}
int Sift::getNumOfStreamTasks(
const int gpu_id,
const int stream_id) {
return 1;
}
void Sift::runOnGPU(
const int gpu_id,
const unsigned int gpu_task_id) {
cudaSafeCall(cudaSetDevice(gpu_id));
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
std::shared_ptr<SubDomainDataOnStream> stream_data0 = subdom_data->stream_data[0];
unsigned int x_sub_i = subdom_data->x_sub_i_list[gpu_task_id];
unsigned int y_sub_i = subdom_data->y_sub_i_list[gpu_task_id];
#ifdef DEBUG_OUTPUT
CudaTimer timer;
logger_->info("===== gpu_id={} x_sub_i={} y_sub_i={}", gpu_id, x_sub_i, y_sub_i);
#endif
unsigned int x_sub_start = x_sub_i * x_sub_size_;
unsigned int y_sub_start = y_sub_i * y_sub_size_;
// clamp delta to end value
unsigned int x_sub_delta = get_delta(x_size_, x_sub_i, x_sub_size_);
unsigned int y_sub_delta = get_delta(y_size_, y_sub_i, y_sub_size_);
// only add in pad factor at first
unsigned int base_x_sub = (x_sub_i > 0 ? 0 : dw_);
unsigned int base_y_sub = (y_sub_i > 0 ? 0 : dw_);
// subtract pad factor after first
unsigned int padding_x_sub_start = x_sub_start - (x_sub_i > 0 ? dw_ : 0);
unsigned int padding_y_sub_start = y_sub_start - (y_sub_i > 0 ? dw_ : 0);
unsigned int padding_x_sub_delta = x_sub_delta + (x_sub_i > 0 ? dw_ : 0) + (x_sub_i < num_x_sub_ - 1 ? dw_ : 0);
unsigned int padding_y_sub_delta = y_sub_delta + (y_sub_i > 0 ? dw_ : 0) + (y_sub_i < num_y_sub_ - 1 ? dw_ : 0);
// per GPU padded image size
size_t padded_sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;
#ifdef DEBUG_OUTPUT
unsigned int x_sub_end = x_sub_start + x_sub_delta;
unsigned int y_sub_end = y_sub_start + y_sub_delta;
logger_->debug("x_sub=({},{},{}) y_sub=({},{},{})", x_sub_start, x_sub_delta, x_sub_end, y_sub_start, y_sub_delta, y_sub_end);
logger_->debug("base_x_sub={},base_y_sub={}", base_x_sub, base_y_sub);
#ifdef DEBUG_OUTPUT_MATRIX
// print the x, y, z image / map coordinates of the selected keypoints
if (gpu_id == 0) { // don't repeat this for every GPU
for (long long idx=0; idx < x_size_ * y_size_ * z_size_; idx++) {
if (! dom_data_->h_map[idx]) {
unsigned int x;
unsigned int y;
unsigned int z;
ind2sub(x_size_, y_size_, idx, x, y, z);
logger_->info("h_map 0's: idx={}, x={}, y={}, z={}",
idx, x, y, z);
}
}
}
#endif
#endif
// allocate the per GPU padded map and image
int8_t *padded_sub_map;
double *padded_sub_image;
cudaSafeCall(cudaHostAlloc(&padded_sub_map, padded_sub_volume_size *
sizeof(int8_t), cudaHostAllocPortable));
cudaCheckError();
cudaSafeCall(cudaHostAlloc(&padded_sub_image, padded_sub_volume_size *
sizeof(double), cudaHostAllocPortable));
cudaCheckError();
// First set all values to holder value -1
thrust::fill(padded_sub_map, padded_sub_map + padded_sub_volume_size, -1);
for (unsigned int k = 0; k < z_size_; k++) {
for (unsigned int j = 0; j < padding_y_sub_delta; j++) {
// get row-major / c-order linear index according orig. dim [x_size, y_size, z_size]
size_t src_idx = dom_data_->sub2ind(padding_x_sub_start, padding_y_sub_start + j, k);
size_t dst_idx = subdom_data->pad_sub2ind(base_x_sub, base_y_sub + j, dw_ + k);
int8_t* src_map_begin = &(dom_data_->h_map[src_idx]);
int8_t* dst_map_begin = &(padded_sub_map[dst_idx]);
// note this assumes the rows to be contiguous in memory (row-order / c-order)
thrust::copy(src_map_begin, src_map_begin + padding_x_sub_delta, dst_map_begin);
double* src_image_begin = &(dom_data_->h_image[src_idx]);
double* dst_image_begin = &(padded_sub_image[dst_idx]);
thrust::copy(src_image_begin, src_image_begin + padding_x_sub_delta, dst_image_begin);
}
}
#ifdef DEBUG_OUTPUT_MATRIX
// print the x, y, z in padded image / map coordinates of the selected keypoints
for (long long i=0; i < padded_sub_volume_size; i++) {
if (!padded_sub_map[i]) {
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, i, padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
unsigned int x = x_sub_start + padding_x - dw_ + 1;
unsigned int y = y_sub_start + padding_y - dw_ + 1;
unsigned int z = padding_z - dw_ + 1;
logger_->info("padded_sub_map 0's (matlab 1-indexed): idx={}, x={}, y={}, z={}",
i, x, y, z);
}
}
#endif
thrust::fill(thrust::device, subdom_data->padded_image, subdom_data->padded_image + padded_sub_volume_size, 0.0);
cudaSafeCall(cudaMemcpyAsync(
subdom_data->padded_image,
padded_sub_image,
padded_sub_volume_size * sizeof(double),
cudaMemcpyHostToDevice, stream_data0->stream));
#ifdef DEBUG_OUTPUT
cudaSafeCall(cudaStreamSynchronize(stream_data0->stream));
logger_->info("transfer image data {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
logger_->info("===== dev image");
print_matrix3d(logger_, x_size_, y_size_, 0, 0, 0, x_size_, y_size_, z_size_, dom_data_->h_image);
print_matrix3d_dev(logger_, x_sub_stride_, y_sub_stride_, z_stride_, 0, 0, 0, x_sub_stride_, y_sub_stride_, z_stride_, subdom_data->padded_image);
#endif
timer.reset();
#endif
cudaSafeCall(cudaMemcpyAsync(
subdom_data->padded_map,
padded_sub_map,
padded_sub_volume_size * sizeof(int8_t),
cudaMemcpyHostToDevice, stream_data0->stream));
#ifdef DEBUG_OUTPUT
cudaSafeCall(cudaStreamSynchronize(stream_data0->stream));
logger_->info("transfer map data {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
logger_->debug("===== dev map");
print_matrix3d(logger_, x_size_, y_size_, 0, 0, 0, x_size_, y_size_, z_size_, dom_data_->h_map);
print_matrix3d_dev(logger_, x_sub_stride_, y_sub_stride_, z_stride_, 0, 0, 0, x_sub_stride_, y_sub_stride_, z_stride_, subdom_data->padded_map);
#endif
timer.reset();
#endif
// clear previous result to zero
thrust::fill(thrust::device, subdom_data->padded_map_idx, subdom_data->padded_map_idx + padded_sub_volume_size, 0.0);
/*Note: padded_sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;*/
auto end_itr = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator<unsigned int>(0), // count indexes from 0
thrust::make_counting_iterator<unsigned int>(padded_sub_volume_size), // ...to padded_sub_volume_size
subdom_data->padded_map, //beginning of stencil sequence
subdom_data->padded_map_idx, // beginning of sequence to copy into
thrust::logical_not<int8_t>());//predicate test on every value
subdom_data->padded_map_idx_size = end_itr - subdom_data->padded_map_idx;
// set all padded map boundaries (still -1) to 0 for correctness to
// distinguish boundaries
thrust::replace(thrust::device, subdom_data->padded_map, subdom_data->padded_map + padded_sub_volume_size, -1, 0);
#ifdef DEBUG_OUTPUT
cudaSafeCall(cudaStreamSynchronize(stream_data0->stream));
logger_->info("calculate map idx {}", timer.get_laptime());
logger_->info("padded_map_idx_size={}", subdom_data->padded_map_idx_size);
timer.reset();
#endif
// Each GPU each subdom_data
// this set the dx and dy start idx for each stream
unsigned int num_dx = get_num_blocks(x_sub_delta, dx_);
unsigned int num_dy = get_num_blocks(y_sub_delta, dy_);
unsigned int stream_id = 0;
for (unsigned int dy_i = 0; dy_i < num_dy; dy_i++) {
for (unsigned int dx_i = 0; dx_i < num_dx; dx_i++) {
subdom_data->stream_data[stream_id]->dx_i_list.push_back(dx_i);
subdom_data->stream_data[stream_id]->dy_i_list.push_back(dy_i);
stream_id++;
if (stream_id == num_streams_) {
stream_id = 0;
}
}
}
cudaSafeCall(cudaStreamSynchronize(stream_data0->stream));
cudaSafeCall(cudaFreeHost(padded_sub_map));
cudaSafeCall(cudaFreeHost(padded_sub_image));
}
const cudautils::SiftParams Sift::get_sift_params() {
return sift_params_;
}
void Sift::postrun() {
// count keypoints
int total_keypoints = 0;
for (int gpu_id = 0; gpu_id < num_gpus_; gpu_id++) {
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
for (int stream_id = 0; stream_id < num_streams_; stream_id++) {
std::shared_ptr<SubDomainDataOnStream> stream_data =
subdom_data->stream_data[stream_id];
/*logger_->info("gpu_id {}, streamid {}, # of kypts {}", gpu_id, stream_id, stream_data->keystore.size());*/
total_keypoints += stream_data->keystore.size();
}
}
// allocate for number of keypoints
dom_data_->keystore->len = total_keypoints;
/*logger_->info("total_keypoints {}", total_keypoints);*/
if (total_keypoints < 1)
return;
cudaHostAlloc(&(dom_data_->keystore->buf), dom_data_->keystore->len *
sizeof(cudautils::Keypoint), cudaHostAllocPortable);
// copy keypoints to host
int counter = 0;
for (int gpu_id = 0; gpu_id < num_gpus_; gpu_id++) {
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
for (int stream_id = 0; stream_id < num_streams_; stream_id++) {
std::shared_ptr<SubDomainDataOnStream> stream_data =
subdom_data->stream_data[stream_id];
for (int i = 0; i < stream_data->keystore.size(); i++) {
dom_data_->keystore->buf[counter] = stream_data->keystore[i];
counter++;
}
}
}
assert(counter == total_keypoints);
return;
}
void Sift::runOnStream(
const int gpu_id,
const int stream_id,
const unsigned int gpu_task_id) {
cudaSetDevice(gpu_id);
std::shared_ptr<SubDomainDataOnGPU> subdom_data = subdom_data_[gpu_id];
std::shared_ptr<SubDomainDataOnStream> stream_data = subdom_data->stream_data[stream_id];
unsigned int x_sub_i = subdom_data->x_sub_i_list[gpu_task_id];
unsigned int y_sub_i = subdom_data->y_sub_i_list[gpu_task_id];
unsigned int x_sub_delta = get_delta(x_size_, x_sub_i, x_sub_size_);
unsigned int y_sub_delta = get_delta(y_size_, y_sub_i, y_sub_size_);
unsigned int x_sub_start = x_sub_i * x_sub_size_;
unsigned int y_sub_start = y_sub_i * y_sub_size_;
#ifdef DEBUG_OUTPUT
CudaTimer timer(stream_data->stream);
#endif
// each stream has a individual subsections of data, that each kernel call will operate on
// these subsections start/stop idx are determined by dx_i and dy_i lists
for (auto dx_itr = stream_data->dx_i_list.begin(), dy_itr = stream_data->dy_i_list.begin();
dx_itr != stream_data->dx_i_list.end() || dy_itr != stream_data->dy_i_list.end();
dx_itr++, dy_itr++) {
unsigned int dx_i = *dx_itr;
unsigned int dy_i = *dy_itr;
unsigned int dx_start = dx_i * dx_;
unsigned int dx_delta = get_delta(x_sub_delta, dx_i, dx_);
unsigned int dx_end = dx_start + dx_delta;
unsigned int dy_start = dy_i * dy_;
unsigned int dy_delta = get_delta(y_sub_delta, dy_i, dy_);
unsigned int dy_end = dy_start + dy_delta;
#ifdef DEBUG_OUTPUT
logger_->info("dx_i={}, dy_i={}", dx_i, dy_i);
logger_->info("x=({},{},{}) y=({},{},{}), dw={}", dx_start, dx_delta, dx_end, dy_start, dy_delta, dy_end, dw_);
logger_->info("subdom_data->padded_map_idx_size={}", subdom_data->padded_map_idx_size);
#endif
// create each substream data on device
long long int *substream_padded_map_idx;
cudaSafeCall(cudaMalloc(&substream_padded_map_idx,
subdom_data->padded_map_idx_size * sizeof(long long int)));
RangeCheck range_check { x_sub_stride_, y_sub_stride_,
dx_start + dw_, dx_end + dw_, dy_start + dw_, dy_end + dw_, dw_, z_size_ + dw_ };
// copy the relevant (in range) idx elements from the
// global GPU padded_map_idx to the local substream_padded_map_idx
auto end_itr = thrust::copy_if(
thrust::device,
subdom_data->padded_map_idx,
subdom_data->padded_map_idx + subdom_data->padded_map_idx_size,
substream_padded_map_idx,
range_check);
const unsigned int substream_padded_map_idx_size = end_itr - substream_padded_map_idx;
#ifdef DEBUG_OUTPUT
logger_->info("substream_padded_map_idx_size={}", substream_padded_map_idx_size);
logger_->info("transfer map idx {}", timer.get_laptime());
#ifdef DEBUG_OUTPUT_MATRIX
cudaSafeCall(cudaStreamSynchronize(stream_data->stream));
thrust::device_vector<long long int> dbg_d_padded_map_idx(substream_padded_map_idx,
substream_padded_map_idx + substream_padded_map_idx_size);
thrust::host_vector<unsigned int> dbg_h_padded_map_idx(dbg_d_padded_map_idx);
for (unsigned int i = 0; i < substream_padded_map_idx_size; i++) {
logger_->debug("substream_padded_map_idx={}", dbg_h_padded_map_idx[i]);
}
#endif
timer.reset();
#endif
if (substream_padded_map_idx_size == 0) {
#ifdef DEBUG_OUTPUT
logger_->debug("no map to be padded");
#endif
continue;
}
// only calculate location and save keypoints
if (sift_params_.skipDescriptor) {
#ifdef DEBUG_OUTPUT
logger_->debug("Skip calculatation of descriptors");
#endif
// transfer index map to host for referencing correct index
long long int *h_padded_map_idx;
cudaSafeCall(cudaHostAlloc((void **) &h_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
cudaHostAllocPortable));
cudaSafeCall(cudaMemcpyAsync(
h_padded_map_idx,
substream_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
cudaMemcpyDeviceToHost, stream_data->stream));
// make sure all async memcpys (above) are finished before access
cudaSafeCall(cudaStreamSynchronize(stream_data->stream));
// save data for all streams to global Sift object store
for (int i = 0; i < substream_padded_map_idx_size; i++) {
Keypoint temp;
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, h_padded_map_idx[i], padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
temp.x = x_sub_start + padding_x - dw_ + 1;
temp.y = y_sub_start + padding_y - dw_ + 1;
temp.z = padding_z - dw_ + 1;
stream_data->keystore.push_back(temp);
}
cudaSafeCall(cudaFree(substream_padded_map_idx));
continue; // do this for every substream forloop
}
/*
Create an array to hold each descriptor ivec vector on VRAM
essentially a matrix of substream_padded_map_idx_size by descriptor length
*/
double *descriptors, *yy_scratch, *ori_scratch;
uint16_t * idx_scratch, *ori_idx_scratch;
long desc_mem_size = sift_params_.descriptor_len *
substream_padded_map_idx_size * sizeof(double);
cudaSafeCall(cudaMalloc(&descriptors, desc_mem_size));
// default nFaces 80; 640 bytes per keypoint yy
cudaSafeCall(cudaMalloc(&yy_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(double)));
// 160 bytes per keypoint idx
cudaSafeCall(cudaMalloc(&idx_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(uint16_t)));
if (sift_params_.TwoPeak_Flag) {
// default nFaces=80
cudaSafeCall(cudaMalloc(&ori_idx_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(uint16_t)));
cudaSafeCall(cudaMalloc(&ori_scratch, sift_params_.nFaces *
substream_padded_map_idx_size * sizeof(double)));
}
// One keypoint per thread, one thread per block
unsigned int num_threads = 1;
// round up by number of threads per block, to calc num of blocks
unsigned int num_blocks = get_num_blocks(substream_padded_map_idx_size, num_threads);
#ifdef DEBUG_OUTPUT
/*cudaSafeCall(cudaStreamSynchronize(stream_data->stream));*/
logger_->debug("num_blocks={}", num_blocks);
logger_->debug("num_threads={}", num_threads);
#endif
if (num_blocks * num_threads < substream_padded_map_idx_size) {
logger_->info("Error occured in numblocks and num_threads estimation... returning from stream");
return;
}
#ifdef DEBUG_OUTPUT
logger_->debug("create_descriptor");
timer.reset();
#endif
// sift_params.fv_centers must be placed on device since array passed to cuda kernel
double* device_centers;
// default fv_centers_len 80 * 3 (3D) = 240;
cudaSafeCall(cudaMalloc((void **) &device_centers,
sizeof(double) * sift_params_.fv_centers_len));
cudaSafeCall(cudaMemcpy((void *) device_centers, (const void *) fv_centers_,
(size_t) sizeof(double) * sift_params_.fv_centers_len,
cudaMemcpyHostToDevice));
#ifdef DEBUG_OUTPUT_MATRIX
/*printf("Print image\n");*/
/*cudaStreamSynchronize(stream_data->stream);*/
/*int sub_volume_size = x_sub_stride_ * y_sub_stride_ * z_stride_;*/
/*double* dbg_h_image = (double*) malloc(sizeof(double) * sub_volume_size);*/
/*cudaSafeCall(cudaMemcpy((void **) dbg_h_image, subdom_data->padded_image,*/
/*sizeof(double) * sub_volume_size,*/
/*cudaMemcpyDeviceToHost));*/
/*// print*/
/*for (int i=0; i < sub_volume_size; i++) {*/
/*if (dbg_h_image[i] != 0.0) {*/
/*printf("host image[%d]: %f\n", i, dbg_h_image[i]);*/
/*}*/
/*}*/
#endif
create_descriptor<<<num_blocks, num_threads, 0, stream_data->stream>>>(
x_sub_stride_, y_sub_stride_, x_sub_start, y_sub_start,
dw_, // pad width
substream_padded_map_idx_size, // total number of keypoints to process
substream_padded_map_idx, //substream map, filtered linear idx into per GPU padded_map and padded_image
subdom_data->padded_map,//global map split per GPU
subdom_data->padded_image,//image split per GPU
sift_params_,
device_centers,
descriptors,
idx_scratch,
yy_scratch,
ori_idx_scratch,
ori_scratch);
cudaCheckError();
#ifdef DEBUG_OUTPUT
logger_->info("create descriptors elapsed: {}", timer.get_laptime());
timer.reset();
#endif
// transfer vector descriptors via host pinned memory for faster async cpy
double *h_descriptors;
cudaSafeCall(cudaHostAlloc((void **) &h_descriptors, desc_mem_size, cudaHostAllocPortable));
cudaSafeCall(cudaMemcpyAsync(
h_descriptors,
descriptors,
desc_mem_size,
cudaMemcpyDeviceToHost, stream_data->stream));
// transfer index map to host for referencing correct index
long long int *h_padded_map_idx;
cudaSafeCall(cudaHostAlloc((void **) &h_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
cudaHostAllocPortable));
cudaSafeCall(cudaMemcpyAsync(
h_padded_map_idx,
substream_padded_map_idx,
substream_padded_map_idx_size * sizeof(long long int),
cudaMemcpyDeviceToHost, stream_data->stream));
#ifdef DEBUG_OUTPUT_MATRIX
for (int i=0; i < substream_padded_map_idx_size; i++) {
printf("h_padded_map_idx:%lld\n", h_padded_map_idx[i]);
if (i % sift_params_.descriptor_len == 0) {
printf("\n\nDescriptor:%d\n", (int) i / sift_params_.descriptor_len);
}
printf("%d: %d\n", i, h_descriptors[i]);
}
#endif
// make sure all async memcpys (above) are finished before access
cudaSafeCall(cudaStreamSynchronize(stream_data->stream));
// save data for all streams to global Sift object store
int skip_counter = 0;
for (int i = 0; i < substream_padded_map_idx_size; i++) {
Keypoint temp;
if (sift_params_.TwoPeak_Flag) {
if (h_padded_map_idx[i] == -1) {
skip_counter++;
continue;
}
}
unsigned int padding_x;
unsigned int padding_y;
unsigned int padding_z;
ind2sub(x_sub_stride_, y_sub_stride_, h_padded_map_idx[i], padding_x, padding_y, padding_z);
// correct for dw_ padding, matlab is 1-indexed
temp.x = x_sub_start + padding_x - dw_ + 1;
temp.y = y_sub_start + padding_y - dw_ + 1;
temp.z = padding_z - dw_ + 1;
temp.ivec = (double*) malloc(sift_params_.descriptor_len * sizeof(double));
memcpy(temp.ivec, &(h_descriptors[i * sift_params_.descriptor_len]),
sift_params_.descriptor_len * sizeof(double));
temp.xyScale = sift_params_.xyScale;
temp.tScale = sift_params_.tScale;
// buffer the size of the whole image
stream_data->keystore.push_back(temp);
}
cudaSafeCall(cudaFree(substream_padded_map_idx));
cudaSafeCall(cudaFree(descriptors));
cudaSafeCall(cudaFree(device_centers));
cudaSafeCall(cudaFree(idx_scratch));
cudaSafeCall(cudaFree(yy_scratch));
if (sift_params_.TwoPeak_Flag) {
cudaSafeCall(cudaFree(ori_idx_scratch));
cudaSafeCall(cudaFree(ori_scratch));
}
cudaSafeCall(cudaFreeHost(h_descriptors));
cudaSafeCall(cudaFreeHost(h_padded_map_idx));
#ifdef DEBUG_OUTPUT
logger_->info("gpu:{}, stream:{}, substream_padded_map_idx_size={}, saved={}",
gpu_id, stream_id, substream_padded_map_idx_size,
substream_padded_map_idx_size - skip_counter);
logger_->info("transfer d2h and copy descriptor ivec values {}", timer.get_laptime());
#endif
}
}
} // namespace cudautils
|
e6c4fd560ac21ddfb4beea1afb7943be6808b84b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#define PI 3.14159f
#define UMBRAL 0.001f
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
__global__ void kernel1(float *layerGPU,float *layerCopyGPU,int layer_size){
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
if(idGlobal > layer_size-1) return;
layerCopyGPU[idGlobal] = layerGPU[idGlobal];
}
__global__ void kernel2(float *layerGPU,float *layerCopyGPU,int layer_size){
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
if(idGlobal > layer_size-1) return;
if(idGlobal != 0 && idGlobal != layer_size-1)
layerGPU[idGlobal] = ( layerCopyGPU[idGlobal-1] + layerCopyGPU[idGlobal] + layerCopyGPU[idGlobal+1] ) / 3;
}
/* ESTA FUNCION PUEDE SER MODIFICADA */
/* Funcion para actualizar una posicion de la capa */
__global__ void actualiza( float *layerGPU, int pos, float energia,int layer_size ) {
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
/* 1. Calcular valor absoluto de la distancia entre el
punto de impacto y el punto k de la capa */
int distancia = pos - idGlobal;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= UMBRAL || energia_k <= -UMBRAL )
layerGPU[idGlobal] = layerGPU[idGlobal] + energia_k;
}
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
float *layerGPU;
float *layerCopyGPU;
Storm *stormGPU;
hipError_t errorLayerGPU = hipMalloc(&layerGPU,sizeof(float)*layer_size);
hipError_t errorLayerCopyGPU = hipMalloc(&layerCopyGPU,sizeof(float)*layer_size);
hipError_t errorStorm = hipMalloc(&stormGPU,num_storms*sizeof(Storm));
dim3 numThreads(16,16);
int numBlocks;
if(layer_size <= 256){
numBlocks = 1;
}else{
numBlocks = layer_size/256;
if(layer_size%256 != 0)
numBlocks++;
}
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
/* 4.1. Suma energia de impactos */
/* Para cada particula */
hipError_t errorDevCpyLayer = hipMemcpy(layerGPU,layer,sizeof(float)*layer_size,hipMemcpyHostToDevice);
hipError_t errorDevCpyLayerCopy = hipMemcpy(layerCopyGPU,layer_copy,sizeof(float)*layer_size,hipMemcpyHostToDevice);
hipMemcpy(stormGPU,storms,sizeof(Storm)*num_storms,hipMemcpyHostToDevice);
// for( j=0; j<storms[i].size; j++ ) {
// float energia = (float)storms[i].posval[j*2+1] / 1000;
// int posicion = storms[i].posval[j*2];
// actualiza<<<numBlocks,numThreads>>>(layerGPU,posicion,energia,layer_size);
// for( k=0; k<layer_size; k++ ) {
// /* Actualizar posicion */
// actualiza( layer, k, posicion, energia );
// }
// }
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
//for( k=0; k<layer_size; k++ )
// layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//for( k=1; k<layer_size-1; k++ )
// layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
hipLaunchKernelGGL(( kernel1), dim3(numBlocks),dim3(numThreads), 0, 0, layerGPU,layerCopyGPU,layer_size);
hipLaunchKernelGGL(( kernel2), dim3(numBlocks),dim3(numThreads), 0, 0, layerGPU,layerCopyGPU,layer_size);
hipError_t errorHostCpyLayer = hipMemcpy(layer,layerGPU,sizeof(float)*layer_size,hipMemcpyDeviceToHost);
hipError_t errorHostCpyLayerCopy = hipMemcpy(layer_copy,layerCopyGPU,sizeof(float)*layer_size,hipMemcpyDeviceToHost);
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}
hipError_t freeLayer = hipFree(layerGPU);
hipError_t freeLayerCopy = hipFree(layerCopyGPU);
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
|
e6c4fd560ac21ddfb4beea1afb7943be6808b84b.cu
|
/*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#define PI 3.14159f
#define UMBRAL 0.001f
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
__global__ void kernel1(float *layerGPU,float *layerCopyGPU,int layer_size){
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
if(idGlobal > layer_size-1) return;
layerCopyGPU[idGlobal] = layerGPU[idGlobal];
}
__global__ void kernel2(float *layerGPU,float *layerCopyGPU,int layer_size){
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
if(idGlobal > layer_size-1) return;
if(idGlobal != 0 && idGlobal != layer_size-1)
layerGPU[idGlobal] = ( layerCopyGPU[idGlobal-1] + layerCopyGPU[idGlobal] + layerCopyGPU[idGlobal+1] ) / 3;
}
/* ESTA FUNCION PUEDE SER MODIFICADA */
/* Funcion para actualizar una posicion de la capa */
__global__ void actualiza( float *layerGPU, int pos, float energia,int layer_size ) {
int idGlobal = threadIdx.x+(blockDim.x*threadIdx.y)+(blockDim.x*blockDim.y*blockIdx.x);
/* 1. Calcular valor absoluto de la distancia entre el
punto de impacto y el punto k de la capa */
int distancia = pos - idGlobal;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= UMBRAL || energia_k <= -UMBRAL )
layerGPU[idGlobal] = layerGPU[idGlobal] + energia_k;
}
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
float *layerGPU;
float *layerCopyGPU;
Storm *stormGPU;
cudaError_t errorLayerGPU = cudaMalloc(&layerGPU,sizeof(float)*layer_size);
cudaError_t errorLayerCopyGPU = cudaMalloc(&layerCopyGPU,sizeof(float)*layer_size);
cudaError_t errorStorm = cudaMalloc(&stormGPU,num_storms*sizeof(Storm));
dim3 numThreads(16,16);
int numBlocks;
if(layer_size <= 256){
numBlocks = 1;
}else{
numBlocks = layer_size/256;
if(layer_size%256 != 0)
numBlocks++;
}
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
/* 4.1. Suma energia de impactos */
/* Para cada particula */
cudaError_t errorDevCpyLayer = cudaMemcpy(layerGPU,layer,sizeof(float)*layer_size,cudaMemcpyHostToDevice);
cudaError_t errorDevCpyLayerCopy = cudaMemcpy(layerCopyGPU,layer_copy,sizeof(float)*layer_size,cudaMemcpyHostToDevice);
cudaMemcpy(stormGPU,storms,sizeof(Storm)*num_storms,cudaMemcpyHostToDevice);
// for( j=0; j<storms[i].size; j++ ) {
// float energia = (float)storms[i].posval[j*2+1] / 1000;
// int posicion = storms[i].posval[j*2];
// actualiza<<<numBlocks,numThreads>>>(layerGPU,posicion,energia,layer_size);
// for( k=0; k<layer_size; k++ ) {
// /* Actualizar posicion */
// actualiza( layer, k, posicion, energia );
// }
// }
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
//for( k=0; k<layer_size; k++ )
// layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//for( k=1; k<layer_size-1; k++ )
// layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
kernel1<<<numBlocks,numThreads>>>(layerGPU,layerCopyGPU,layer_size);
kernel2<<<numBlocks,numThreads>>>(layerGPU,layerCopyGPU,layer_size);
cudaError_t errorHostCpyLayer = cudaMemcpy(layer,layerGPU,sizeof(float)*layer_size,cudaMemcpyDeviceToHost);
cudaError_t errorHostCpyLayerCopy = cudaMemcpy(layer_copy,layerCopyGPU,sizeof(float)*layer_size,cudaMemcpyDeviceToHost);
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}
cudaError_t freeLayer = cudaFree(layerGPU);
cudaError_t freeLayerCopy = cudaFree(layerCopyGPU);
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
|
2696009a504602d763b2d71e8c85c60fb99887f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse(int * in, int * out, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in[size - index - 1];
}
int main()
{
int * in, * out;
int * d_in, * d_out;
int size = N * sizeof(int);
int i;
hipMalloc((void**)&d_in, size);
hipMalloc((void**)&d_out, size);
in = (int *)malloc(size);
out = (int *)malloc(size);
for(i = 0; i<N; ++i)
{
in[i] = i;
}
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reverse), dim3(N/THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK) , 0, 0, d_in, d_out, N);
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
for(i=0; i<N; ++i)
{
if(out[i] != in[N-i-1]) {
printf("error\n");
break;
}
}
if(i == N){
printf("correct\n");
}
free(in); free(out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
|
2696009a504602d763b2d71e8c85c60fb99887f1.cu
|
#include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse(int * in, int * out, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in[size - index - 1];
}
int main()
{
int * in, * out;
int * d_in, * d_out;
int size = N * sizeof(int);
int i;
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
in = (int *)malloc(size);
out = (int *)malloc(size);
for(i = 0; i<N; ++i)
{
in[i] = i;
}
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
reverse<<< N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(d_in, d_out, N);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
for(i=0; i<N; ++i)
{
if(out[i] != in[N-i-1]) {
printf("error\n");
break;
}
}
if(i == N){
printf("correct\n");
}
free(in); free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
d0bb5314855278447a7a1125173dda5f98b68444.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <limits>
#include <time.h>
#include <hip/hip_runtime.h>
#include <minigun/minigun.h>
#include "./baseline/yzh_kernels.cuh"
#include "./minigun/esoftmax.cuh"
#include "../samples_io.h"
#include "../samples_utils.h"
using minigun::advance::RuntimeConfig;
using namespace esoftmax;
double RunMinigun(const utils::SampleCsr& scsr,
const minigun::Csr& csr,
mg_int feat_size, mg_int num_heads) {
// gdata
GData gdata, truth;
gdata.H = num_heads;
InitGData(scsr, &gdata, &truth);
CUDA_CALL(hipDeviceSynchronize());
// create stream
RuntimeConfig rtcfg;
rtcfg.ctx = {kDLGPU, 0};
int nt = utils::_FindNumThreads(gdata.H, 32);
rtcfg.data_num_threads = nt;
rtcfg.data_num_blocks = gdata.H / nt;
CUDA_CALL(hipStreamCreate(&rtcfg.stream));
minigun::IntArray1D infront;
// dry run
typedef minigun::advance::Config<true, minigun::advance::kV2N> Config;
minigun::advance::Advance<kDLGPU, Config, GData, EdgeMax>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, MinusMaxExpSum>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, Norm>(
rtcfg, csr, &gdata, infront);
CUDA_CALL(hipDeviceSynchronize());
CheckResult(scsr, &gdata, &truth);
const int K = 10;
timeval t0, t1;
gettimeofday(&t0, nullptr);
for (int i = 0; i < K; ++i) {
minigun::advance::Advance<kDLGPU, Config, GData, EdgeMax>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, MinusMaxExpSum>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, Norm>(
rtcfg, csr, &gdata, infront);
}
CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&t1, nullptr);
double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec -
(t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms
FreeGData(&gdata, &truth);
return dur;
}
double RunBaseline1(const utils::SampleCsr& scsr,
const minigun::Csr& csr,
mg_int feat_size, mg_int num_heads) {
// gdata
GData gdata, truth;
gdata.H = num_heads;
InitGData(scsr, &gdata, &truth);
const mg_int N = csr.row_offsets.length - 1;
const int H = gdata.H;
// dry run
hipLaunchKernelGGL(( custom_kernel::sparse_softmax_forward_kernel<mg_int, float>), dim3((N + 31) / 32), dim3(dim3(32, H)), 0, 0,
csr.row_offsets.data,
gdata.score,
gdata.ret,
(int)N, (int)H);
CUDA_CALL(hipDeviceSynchronize());
const int K = 10;
timeval t0, t1;
gettimeofday(&t0, nullptr);
for (int i = 0; i < K; ++i) {
hipLaunchKernelGGL(( custom_kernel::sparse_softmax_forward_kernel<mg_int, float>), dim3((N + 31) / 32), dim3(dim3(32, H)), 0, 0,
csr.row_offsets.data,
gdata.score,
gdata.ret,
(int)N, (int)H);
}
CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&t1, nullptr);
double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec -
(t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms
FreeGData(&gdata, &truth);
return dur;
}
int main(int argc, char** argv) {
srand(42);
if (argc < 3) {
std::cout << "USAGE: ./bench_masked_mm <file_name> <num_heads>" << std::endl;
return 1;
}
const char* filename = argv[1];
const int num_heads = std::atoi(argv[2]);
std::cout << "filename=" << filename << " num_heads=" << num_heads << std::endl;
utils::SampleCsr scsr;
utils::LoadGraphFromFile(filename, &scsr);
const mg_int N = scsr.row_offsets.size() - 1;
const mg_int M = scsr.column_indices.size();
std::cout << "#Nodes: " << N << " #Edges: " << M << std::endl;
// csr
minigun::Csr csr = utils::ToMinigunCsr(scsr, kDLGPU);
double dur1 = RunMinigun(scsr, csr, 0, num_heads);
std::cout << "minigun time(ms): " << dur1 << std::endl;
double dur2 = RunBaseline1(scsr, csr, 0, num_heads);
std::cout << "baseline1 time(ms): " << dur2 << std::endl;
return 0;
}
|
d0bb5314855278447a7a1125173dda5f98b68444.cu
|
#include <iostream>
#include <cstdlib>
#include <limits>
#include <time.h>
#include <cuda_runtime.h>
#include <minigun/minigun.h>
#include "./baseline/yzh_kernels.cuh"
#include "./minigun/esoftmax.cuh"
#include "../samples_io.h"
#include "../samples_utils.h"
using minigun::advance::RuntimeConfig;
using namespace esoftmax;
double RunMinigun(const utils::SampleCsr& scsr,
const minigun::Csr& csr,
mg_int feat_size, mg_int num_heads) {
// gdata
GData gdata, truth;
gdata.H = num_heads;
InitGData(scsr, &gdata, &truth);
CUDA_CALL(cudaDeviceSynchronize());
// create stream
RuntimeConfig rtcfg;
rtcfg.ctx = {kDLGPU, 0};
int nt = utils::_FindNumThreads(gdata.H, 32);
rtcfg.data_num_threads = nt;
rtcfg.data_num_blocks = gdata.H / nt;
CUDA_CALL(cudaStreamCreate(&rtcfg.stream));
minigun::IntArray1D infront;
// dry run
typedef minigun::advance::Config<true, minigun::advance::kV2N> Config;
minigun::advance::Advance<kDLGPU, Config, GData, EdgeMax>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, MinusMaxExpSum>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, Norm>(
rtcfg, csr, &gdata, infront);
CUDA_CALL(cudaDeviceSynchronize());
CheckResult(scsr, &gdata, &truth);
const int K = 10;
timeval t0, t1;
gettimeofday(&t0, nullptr);
for (int i = 0; i < K; ++i) {
minigun::advance::Advance<kDLGPU, Config, GData, EdgeMax>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, MinusMaxExpSum>(
rtcfg, csr, &gdata, infront);
minigun::advance::Advance<kDLGPU, Config, GData, Norm>(
rtcfg, csr, &gdata, infront);
}
CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&t1, nullptr);
double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec -
(t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms
FreeGData(&gdata, &truth);
return dur;
}
double RunBaseline1(const utils::SampleCsr& scsr,
const minigun::Csr& csr,
mg_int feat_size, mg_int num_heads) {
// gdata
GData gdata, truth;
gdata.H = num_heads;
InitGData(scsr, &gdata, &truth);
const mg_int N = csr.row_offsets.length - 1;
const int H = gdata.H;
// dry run
custom_kernel::sparse_softmax_forward_kernel<mg_int, float><<<(N + 31) / 32, dim3(32, H)>>>(
csr.row_offsets.data,
gdata.score,
gdata.ret,
(int)N, (int)H);
CUDA_CALL(cudaDeviceSynchronize());
const int K = 10;
timeval t0, t1;
gettimeofday(&t0, nullptr);
for (int i = 0; i < K; ++i) {
custom_kernel::sparse_softmax_forward_kernel<mg_int, float><<<(N + 31) / 32, dim3(32, H)>>>(
csr.row_offsets.data,
gdata.score,
gdata.ret,
(int)N, (int)H);
}
CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&t1, nullptr);
double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec -
(t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms
FreeGData(&gdata, &truth);
return dur;
}
int main(int argc, char** argv) {
srand(42);
if (argc < 3) {
std::cout << "USAGE: ./bench_masked_mm <file_name> <num_heads>" << std::endl;
return 1;
}
const char* filename = argv[1];
const int num_heads = std::atoi(argv[2]);
std::cout << "filename=" << filename << " num_heads=" << num_heads << std::endl;
utils::SampleCsr scsr;
utils::LoadGraphFromFile(filename, &scsr);
const mg_int N = scsr.row_offsets.size() - 1;
const mg_int M = scsr.column_indices.size();
std::cout << "#Nodes: " << N << " #Edges: " << M << std::endl;
// csr
minigun::Csr csr = utils::ToMinigunCsr(scsr, kDLGPU);
double dur1 = RunMinigun(scsr, csr, 0, num_heads);
std::cout << "minigun time(ms): " << dur1 << std::endl;
double dur2 = RunBaseline1(scsr, csr, 0, num_heads);
std::cout << "baseline1 time(ms): " << dur2 << std::endl;
return 0;
}
|
d31f747beeec83d09ff4501fd90cdbffbc4df366.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
//GLOBALS
dim3 threadsPerBlock(blockSize);
int numObjects;
const float boidMass = 1.0f;
const float scene_scale = 4e2;
const __device__ float neighborRadius = 20.0f;
const __device__ float g_fMaxForce = 1.0f;
const __device__ float neighborAngle = 180.0f;
const __device__ float c_alignment = 2.0f;
const __device__ float c_separation = 3.0f;
const __device__ float c_cohesion = 0.005f;
const __device__ float c_seek = 0.001f;
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
getchar();
exit(EXIT_FAILURE);
}
}
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__ glm::vec3 truncate(glm::vec3 direction, float maxLength)
{
if(glm::length(direction) > maxLength)
return glm::normalize(direction) * maxLength;
else
return direction;
}
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__ void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = /*0.0f;*/rand.z;
arr[index].w = mass;
}
}
//Generate randomized starting velocities in the XY plane
__global__ void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = /*0.0;*/rand.z;
}
}
//Integration
__global__ void updateAccelaration(int N, float dt, glm::vec4 *pos, glm::vec3 *vel, glm::vec3 *acc, glm::vec3 target)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
glm::vec3 myPosition(pos[index].x, pos[index].y, pos[index].z);
glm::vec3 myVelocity = vel[index];
int numberOfNeighbors = 0;
glm::vec3 alignmentNumerator(0.0f);
glm::vec3 alignmentVelocity(0.0f);
glm::vec3 separationVel(0.0f);
glm::vec3 centerOfMass(0.0f);
glm::vec3 desiredVel(0.0f);
// Calculate desired velocity
for(int i = 0; i < N; ++i)
{
glm::vec3 theirPos(pos[i].x, pos[i].y, pos[i].z);
float distanceToNeighbor = glm::distance(myPosition, theirPos) + EPSILON;
if(distanceToNeighbor < neighborRadius && glm::dot(glm::normalize(myVelocity), glm::normalize(theirPos - myPosition)) > cos(neighborAngle/2))
{
alignmentNumerator += vel[i];
separationVel += (myPosition - theirPos) / distanceToNeighbor /distanceToNeighbor;
centerOfMass += theirPos;
++numberOfNeighbors;
}
}
if(numberOfNeighbors > 0)
{
alignmentVelocity = alignmentNumerator / float(numberOfNeighbors);
centerOfMass = centerOfMass / float(numberOfNeighbors);
desiredVel = c_alignment*alignmentVelocity + c_separation*separationVel + c_cohesion*(centerOfMass - myPosition) + c_seek * glm::normalize(target-myPosition);
}
else desiredVel = c_seek * (target-myPosition);
if(glm::length(myPosition) > 800.0f) desiredVel = glm::normalize(-myPosition);
// Calculate acceleration from steering direction
acc[index] = truncate(desiredVel - myVelocity, g_fMaxForce) / pos[index].w;
}
}
__global__ void updatePosition(int N, float dt, glm::vec4 *pos, glm::vec3 *vel, glm::vec3 *acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
//vel[index] = truncate(vel[index] + acc[index] * dt, g_fMaxSpeed);
vel[index] = glm::normalize(vel[index] + acc[index] * dt) * g_fMaxSpeed;
// RK4 method
glm::vec3 k1 = vel[index];
glm::vec3 k2 = k1 + 0.5f * dt * k1;
glm::vec3 k3 = k1 + 0.5f * dt * k2;
glm::vec3 k4 = k1 + dt * k3;
glm::vec3 increment = 1.0f/6.0f * (k1 + 2.0f*k2 + 2.0f*k3 + k4);
pos[index].x += increment.x * dt;
pos[index].y += increment.y * dt;
pos[index].z += increment.z * dt;
//Euler method
/*pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;*/
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__ void sendToVBO(int N, glm::vec4 * pos, glm::vec3 *vel, float * posVBO, float *velVBO, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = 2.0f / s_scale;
float c_scale_h = 2.0f / s_scale;
float c_scale_s = 2.0f / s_scale;
if(index<N)
{
posVBO[4*index+0] = pos[index].x*c_scale_w;
posVBO[4*index+1] = pos[index].y*c_scale_h;
posVBO[4*index+2] = pos[index].z*c_scale_s;
posVBO[4*index+3] = 1;
velVBO[3*index+0] = vel[index].x*c_scale_w;
velVBO[3*index+1] = vel[index].y*c_scale_h;
velVBO[3*index+2] = vel[index].z*c_scale_s;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); // one dimensional grid
hipMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, boidMass); // one dimensional block
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateRandomVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, g_fMaxSpeed);
checkCUDAErrorWithLine("Kernel failed!");
}
void cudaFlockingUpdateWrapper(float dt, glm::vec3 target)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
hipLaunchKernelGGL(( updateAccelaration), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc, target);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( updatePosition), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
}
void cudaUpdateVBO(float *vbodptr, float *velptr)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel, vbodptr, velptr, scene_scale);
checkCUDAErrorWithLine("Kernel failed!");
}
|
d31f747beeec83d09ff4501fd90cdbffbc4df366.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
//GLOBALS
dim3 threadsPerBlock(blockSize);
int numObjects;
const float boidMass = 1.0f;
const float scene_scale = 4e2;
const __device__ float neighborRadius = 20.0f;
const __device__ float g_fMaxForce = 1.0f;
const __device__ float neighborAngle = 180.0f;
const __device__ float c_alignment = 2.0f;
const __device__ float c_separation = 3.0f;
const __device__ float c_cohesion = 0.005f;
const __device__ float c_seek = 0.001f;
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
getchar();
exit(EXIT_FAILURE);
}
}
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__ glm::vec3 truncate(glm::vec3 direction, float maxLength)
{
if(glm::length(direction) > maxLength)
return glm::normalize(direction) * maxLength;
else
return direction;
}
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__ void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = /*0.0f;*/rand.z;
arr[index].w = mass;
}
}
//Generate randomized starting velocities in the XY plane
__global__ void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = /*0.0;*/rand.z;
}
}
//Integration
__global__ void updateAccelaration(int N, float dt, glm::vec4 *pos, glm::vec3 *vel, glm::vec3 *acc, glm::vec3 target)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
glm::vec3 myPosition(pos[index].x, pos[index].y, pos[index].z);
glm::vec3 myVelocity = vel[index];
int numberOfNeighbors = 0;
glm::vec3 alignmentNumerator(0.0f);
glm::vec3 alignmentVelocity(0.0f);
glm::vec3 separationVel(0.0f);
glm::vec3 centerOfMass(0.0f);
glm::vec3 desiredVel(0.0f);
// Calculate desired velocity
for(int i = 0; i < N; ++i)
{
glm::vec3 theirPos(pos[i].x, pos[i].y, pos[i].z);
float distanceToNeighbor = glm::distance(myPosition, theirPos) + EPSILON;
if(distanceToNeighbor < neighborRadius && glm::dot(glm::normalize(myVelocity), glm::normalize(theirPos - myPosition)) > cos(neighborAngle/2))
{
alignmentNumerator += vel[i];
separationVel += (myPosition - theirPos) / distanceToNeighbor /distanceToNeighbor;
centerOfMass += theirPos;
++numberOfNeighbors;
}
}
if(numberOfNeighbors > 0)
{
alignmentVelocity = alignmentNumerator / float(numberOfNeighbors);
centerOfMass = centerOfMass / float(numberOfNeighbors);
desiredVel = c_alignment*alignmentVelocity + c_separation*separationVel + c_cohesion*(centerOfMass - myPosition) + c_seek * glm::normalize(target-myPosition);
}
else desiredVel = c_seek * (target-myPosition);
if(glm::length(myPosition) > 800.0f) desiredVel = glm::normalize(-myPosition);
// Calculate acceleration from steering direction
acc[index] = truncate(desiredVel - myVelocity, g_fMaxForce) / pos[index].w;
}
}
__global__ void updatePosition(int N, float dt, glm::vec4 *pos, glm::vec3 *vel, glm::vec3 *acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
//vel[index] = truncate(vel[index] + acc[index] * dt, g_fMaxSpeed);
vel[index] = glm::normalize(vel[index] + acc[index] * dt) * g_fMaxSpeed;
// RK4 method
glm::vec3 k1 = vel[index];
glm::vec3 k2 = k1 + 0.5f * dt * k1;
glm::vec3 k3 = k1 + 0.5f * dt * k2;
glm::vec3 k4 = k1 + dt * k3;
glm::vec3 increment = 1.0f/6.0f * (k1 + 2.0f*k2 + 2.0f*k3 + k4);
pos[index].x += increment.x * dt;
pos[index].y += increment.y * dt;
pos[index].z += increment.z * dt;
//Euler method
/*pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;*/
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__ void sendToVBO(int N, glm::vec4 * pos, glm::vec3 *vel, float * posVBO, float *velVBO, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = 2.0f / s_scale;
float c_scale_h = 2.0f / s_scale;
float c_scale_s = 2.0f / s_scale;
if(index<N)
{
posVBO[4*index+0] = pos[index].x*c_scale_w;
posVBO[4*index+1] = pos[index].y*c_scale_h;
posVBO[4*index+2] = pos[index].z*c_scale_s;
posVBO[4*index+3] = 1;
velVBO[3*index+0] = vel[index].x*c_scale_w;
velVBO[3*index+1] = vel[index].y*c_scale_h;
velVBO[3*index+2] = vel[index].z*c_scale_s;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); // one dimensional grid
cudaMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
generateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, boidMass); // one dimensional block
checkCUDAErrorWithLine("Kernel failed!");
generateRandomVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, g_fMaxSpeed);
checkCUDAErrorWithLine("Kernel failed!");
}
void cudaFlockingUpdateWrapper(float dt, glm::vec3 target)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
updateAccelaration<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc, target);
checkCUDAErrorWithLine("Kernel failed!");
updatePosition<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
}
void cudaUpdateVBO(float *vbodptr, float *velptr)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
sendToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel, vbodptr, velptr, scene_scale);
checkCUDAErrorWithLine("Kernel failed!");
}
|
a02d29331d4207327da8498f083b71235c22d3b1.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/remap/backward_mat.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <hip/hip_runtime.h>
#include "src/common/rounding_converter.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#include "src/cuda/remap/common.h"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace remap;
using namespace rounding;
namespace {
template <const uint32_t format>
__device__ inline int get_offset(int height, int width, int channel, int h,
int w, int c);
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NCHW>(
int height, int width, int channel, int h, int w, int c) {
return channel * h * w + height * w + width;
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
struct GetSrcData {
__device__ static inline int get_index(int height, int width, int channel,
int h, int w, int c) {
height = megcv::border_interpolate<bmode>(height, h);
width = megcv::border_interpolate<bmode>(width, w);
return get_offset<format>(height, width, channel, h, w, c);
}
};
template <typename ctype, const uint32_t format>
struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> {
__device__ static inline int get_index(int height, int width, int channel,
int h, int w, int c) {
return (height >= 0 && height < h && width >= 0 && width < w)
? get_offset<format>(height, width, channel, h, w, c)
: -1;
}
};
template <typename ctype, const uint32_t format, ::BorderMode bmode>
__global__ void kern_general(const ctype* src, const float* map_xy,
const ctype* diff, float* __restrict grad, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
src += blockIdx.z * C * IH * IW;
diff += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
grad += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col; // alphaw
float u = index_row - row; // alphah
const float one = 1.f;
for (int c = 0; c < C; ++c) {
float hidden = static_cast<float>(
diff[get_offset<format>(
oh, ow, c, OH, OW, C)]);
float du = 0.f, dv = 0.f;
int a00 = GetSrcData<ctype, format, bmode>::get_index(
row + 0, col + 0, c, IH, IW, C);
int a01 = GetSrcData<ctype, format, bmode>::get_index(
row + 0, col + 1, c, IH, IW, C);
int a10 = GetSrcData<ctype, format, bmode>::get_index(
row + 1, col + 0, c, IH, IW, C);
int a11 = GetSrcData<ctype, format, bmode>::get_index(
row + 1, col + 1, c, IH, IW, C);
dv -= ((a00 != -1) ? src[a00] : scalar) * (one - u);
dv += ((a01 != -1) ? src[a01] : scalar) * (one - u);
dv -= ((a10 != -1) ? src[a10] : scalar) * u;
dv += ((a11 != -1) ? src[a11] : scalar) * u;
du -= ((a00 != -1) ? src[a00] : scalar) * (one - v);
du -= ((a01 != -1) ? src[a01] : scalar) * v;
du += ((a10 != -1) ? src[a10] : scalar) * (one - v);
du += ((a11 != -1) ? src[a11] : scalar) * v;
grad[oh * OW * 2 + ow * 2 + 0] += round_converter(hidden * dv);
grad[oh * OW * 2 + ow * 2 + 1] += round_converter(hidden * du);
}
}
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void dispatch_backwardmat(const ctype* src, const float* map_xy,
const ctype* diff, float* grad, int N, int C, int IH,
int IW, int OH, int OW, float scalar,
hipStream_t stream) {
const int BX = 32, BY = 16;
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
cuda_check(hipMemsetAsync(
grad, 0, sizeof(float) * curr_batch_size * OH * OW * 2,
stream));
hipLaunchKernelGGL(( kern_general<ctype, format, bmode>), dim3(blocks), dim3(threads), 0, stream,
src, map_xy, diff, grad, C, IH, IW, OH, OW, scalar);
N -= curr_batch_size;
src += curr_batch_size * C * IH * IW;
diff += curr_batch_size * C * OH * OW;
map_xy += curr_batch_size * 2 * OH * OW;
grad += curr_batch_size * 2 * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace remap {
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void backwardmat_proxy(const ctype* src, const float* map_xy, const ctype* diff,
float* grad, int N, int C, int IH, int IW, int OH,
int OW, float scalar, hipStream_t stream) {
dispatch_backwardmat<ctype, format, bmode>(src, map_xy, diff, grad, N, C,
IH, IW, OH, OW, scalar, stream);
after_kernel_launch();
}
#define INST(ctype, format, bmode) \
template void backwardmat_proxy<ctype, param_enumv::Remap::Format::format, \
::BorderMode::bmode>( \
const ctype*, const float*, const ctype*, float*, int, int, int, \
int, int, int, float, hipStream_t);
#define FOR_FORMAT_BMODE(ctype) \
INST(ctype, NCHW, BORDER_CONSTANT) \
INST(ctype, NCHW, BORDER_REPLICATE) \
INST(ctype, NCHW, BORDER_REFLECT) \
INST(ctype, NCHW, BORDER_REFLECT_101) \
INST(ctype, NCHW, BORDER_WRAP)
FOR_FORMAT_BMODE(float)
MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16))
#undef FOR_FORMAT_BMODE
#undef INST
} // namespace remap
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
a02d29331d4207327da8498f083b71235c22d3b1.cu
|
/**
* \file dnn/src/cuda/remap/backward_mat.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <cuda_runtime.h>
#include "src/common/rounding_converter.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#include "src/cuda/remap/common.h"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace remap;
using namespace rounding;
namespace {
template <const uint32_t format>
__device__ inline int get_offset(int height, int width, int channel, int h,
int w, int c);
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NCHW>(
int height, int width, int channel, int h, int w, int c) {
return channel * h * w + height * w + width;
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
struct GetSrcData {
__device__ static inline int get_index(int height, int width, int channel,
int h, int w, int c) {
height = megcv::border_interpolate<bmode>(height, h);
width = megcv::border_interpolate<bmode>(width, w);
return get_offset<format>(height, width, channel, h, w, c);
}
};
template <typename ctype, const uint32_t format>
struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> {
__device__ static inline int get_index(int height, int width, int channel,
int h, int w, int c) {
return (height >= 0 && height < h && width >= 0 && width < w)
? get_offset<format>(height, width, channel, h, w, c)
: -1;
}
};
template <typename ctype, const uint32_t format, ::BorderMode bmode>
__global__ void kern_general(const ctype* src, const float* map_xy,
const ctype* diff, float* __restrict grad, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
src += blockIdx.z * C * IH * IW;
diff += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
grad += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col; // alphaw
float u = index_row - row; // alphah
const float one = 1.f;
for (int c = 0; c < C; ++c) {
float hidden = static_cast<float>(
diff[get_offset<format>(
oh, ow, c, OH, OW, C)]);
float du = 0.f, dv = 0.f;
int a00 = GetSrcData<ctype, format, bmode>::get_index(
row + 0, col + 0, c, IH, IW, C);
int a01 = GetSrcData<ctype, format, bmode>::get_index(
row + 0, col + 1, c, IH, IW, C);
int a10 = GetSrcData<ctype, format, bmode>::get_index(
row + 1, col + 0, c, IH, IW, C);
int a11 = GetSrcData<ctype, format, bmode>::get_index(
row + 1, col + 1, c, IH, IW, C);
dv -= ((a00 != -1) ? src[a00] : scalar) * (one - u);
dv += ((a01 != -1) ? src[a01] : scalar) * (one - u);
dv -= ((a10 != -1) ? src[a10] : scalar) * u;
dv += ((a11 != -1) ? src[a11] : scalar) * u;
du -= ((a00 != -1) ? src[a00] : scalar) * (one - v);
du -= ((a01 != -1) ? src[a01] : scalar) * v;
du += ((a10 != -1) ? src[a10] : scalar) * (one - v);
du += ((a11 != -1) ? src[a11] : scalar) * v;
grad[oh * OW * 2 + ow * 2 + 0] += round_converter(hidden * dv);
grad[oh * OW * 2 + ow * 2 + 1] += round_converter(hidden * du);
}
}
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void dispatch_backwardmat(const ctype* src, const float* map_xy,
const ctype* diff, float* grad, int N, int C, int IH,
int IW, int OH, int OW, float scalar,
cudaStream_t stream) {
const int BX = 32, BY = 16;
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
cuda_check(cudaMemsetAsync(
grad, 0, sizeof(float) * curr_batch_size * OH * OW * 2,
stream));
kern_general<ctype, format, bmode><<<blocks, threads, 0, stream>>>(
src, map_xy, diff, grad, C, IH, IW, OH, OW, scalar);
N -= curr_batch_size;
src += curr_batch_size * C * IH * IW;
diff += curr_batch_size * C * OH * OW;
map_xy += curr_batch_size * 2 * OH * OW;
grad += curr_batch_size * 2 * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace remap {
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void backwardmat_proxy(const ctype* src, const float* map_xy, const ctype* diff,
float* grad, int N, int C, int IH, int IW, int OH,
int OW, float scalar, cudaStream_t stream) {
dispatch_backwardmat<ctype, format, bmode>(src, map_xy, diff, grad, N, C,
IH, IW, OH, OW, scalar, stream);
after_kernel_launch();
}
#define INST(ctype, format, bmode) \
template void backwardmat_proxy<ctype, param_enumv::Remap::Format::format, \
::BorderMode::bmode>( \
const ctype*, const float*, const ctype*, float*, int, int, int, \
int, int, int, float, cudaStream_t);
#define FOR_FORMAT_BMODE(ctype) \
INST(ctype, NCHW, BORDER_CONSTANT) \
INST(ctype, NCHW, BORDER_REPLICATE) \
INST(ctype, NCHW, BORDER_REFLECT) \
INST(ctype, NCHW, BORDER_REFLECT_101) \
INST(ctype, NCHW, BORDER_WRAP)
FOR_FORMAT_BMODE(float)
MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16))
#undef FOR_FORMAT_BMODE
#undef INST
} // namespace remap
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
c875d5cb3ba2a390ef83a93d261b900c2b405b6a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2015 Kai Zhang ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define EMULATE_NVM_BW
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <chrono>
#include <byteswap.h>
#include <map>
#include "gpu_hash_mm.h"
#include "zipf.h"
#define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK)
#define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK)
#define LOAD_FACTOR 1 / 8
#define PRELOAD_CNT (uint32_t)(((1 << 30)/8) * LOAD_FACTOR)
#define TOTAL_CNT (((uint32_t)1 << 31) - 1)
#define ZIPF_THETA 0.99
double persist_time = 0, operation_time = 0, memcpy_time = 0;
#define TIME_NOW std::chrono::high_resolution_clock::now()
#define time_diff(a, b) std::chrono::duration_cast<std::chrono::microseconds>(a - b).count()
int main(int argc, char *argv[])
{
ddio_on();
int SELEM_NUM, THREAD_NUM;
if (argc != 3) {
printf("usage: ./run #elem_num #thread_num, now running with 16384\n");
SELEM_NUM = 16384 * 128;
THREAD_NUM = 16384 * 2;
} else {
SELEM_NUM = atoi(argv[1]);
THREAD_NUM = atoi(argv[2]);
}
printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM);
struct zipf_gen_state zipf_state;
mehcached_zipf_init(&zipf_state, (uint64_t)PRELOAD_CNT - 2, (double)ZIPF_THETA, (uint64_t)21);
uint8_t *device_hash_table;
uint8_t *device_in;
uint8_t *host_in;
ielem_t *blk_input_h[INSERT_BLOCK];
int blk_elem_num_h[INSERT_BLOCK];
ielem_t **blk_input_d;
int *blk_elem_num_d;
int i;
std::map<selem_t, loc_t> cpu_map;
uint8_t *device_search_in;
uint8_t *device_search_out;
uint8_t *host_search_in;
uint8_t *host_search_out;
uint8_t *host_search_verify;
CUDA_SAFE_CALL(hipMalloc((void **)&(device_hash_table), HT_SIZE));
CUDA_SAFE_CALL(hipMemset((void *)device_hash_table, 0, HT_SIZE));
// Allocate memory for preloading keys into KVS
CUDA_SAFE_CALL(hipMalloc((void **)&(device_in), PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipMemset((void *)device_in, 0, PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_in), PRELOAD_CNT * sizeof(ielem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*((int)PRELOAD_CNT/INSERT_BLOCK)]);
blk_elem_num_h[i] = 0;
}
// for search
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_in), PRELOAD_CNT * sizeof(selem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_in), PRELOAD_CNT * sizeof(selem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_out), 2 * PRELOAD_CNT * sizeof(loc_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_out), 2 * PRELOAD_CNT * sizeof(loc_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_verify), PRELOAD_CNT * sizeof(loc_t), hipHostMallocDefault));
// Generate keys
printf("Generate %d keys\n", PRELOAD_CNT);
int num_keys = PRELOAD_CNT / INSERT_BLOCK;
for(int i = 0; i < PRELOAD_CNT; ++i) {
int blk = (i + 1) % INSERT_BLOCK;
int index = num_keys * blk + blk_elem_num_h[blk];
// sig
((ielem_t *)host_in)[index].sig =
((selem_t *)host_search_in)[index].sig =
(i + 1);
// hash
((ielem_t *)host_in)[index].hash =
((selem_t *)host_search_in)[index].hash =
(i + 1);
// loc
((ielem_t *)host_in)[index].loc = (loc_t)rand();
cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[index].loc;
blk_elem_num_h[blk]++;
}
CUDA_SAFE_CALL(hipMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, PRELOAD_CNT * sizeof(ielem_t), hipMemcpyHostToDevice));
int is_pmem;
size_t file_size;
void *imkv_file = pmem_map_file("/pmem/imkv_mm", HT_SIZE, PMEM_FILE_CREATE, S_IRWXU | S_IRWXG | S_IRWXO, &file_size, &is_pmem);
// Insert preload keys
printf("Preload %d keys\n", PRELOAD_CNT);
double ins_time = 0, search_time = 0, del_time = 0;
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d, (int *)blk_elem_num_d,
INSERT_BLOCK, PRELOAD_CNT, imkv_file,
0, operation_time, memcpy_time, persist_time);
// verify with search
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in, PRELOAD_CNT * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 1, 2 * PRELOAD_CNT * sizeof(loc_t)));
printf("Verify %d keys\n", PRELOAD_CNT);
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, PRELOAD_CNT, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * PRELOAD_CNT * sizeof(loc_t), hipMemcpyDeviceToHost));
for (i = 0; i < PRELOAD_CNT; i ++) {
loc_t loc = cpu_map[selem_t(((ielem_t *)host_in)[i].sig, ((ielem_t *)host_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
loc);
}
}
// Free memory for preload
CUDA_SAFE_CALL(hipFree(device_in));
CUDA_SAFE_CALL(hipFree(blk_input_d));
CUDA_SAFE_CALL(hipFree(blk_elem_num_d));
CUDA_SAFE_CALL(hipFree(device_search_in));
CUDA_SAFE_CALL(hipFree(device_search_out));
CUDA_SAFE_CALL(hipHostFree(host_in));
CUDA_SAFE_CALL(hipHostFree(host_search_in));
CUDA_SAFE_CALL(hipHostFree(host_search_out));
CUDA_SAFE_CALL(hipHostFree(host_search_verify));
// Allocate for actual insert/searches
CUDA_SAFE_CALL(hipMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(hipMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]);
blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK;
}
CUDA_SAFE_CALL(hipMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), hipMemcpyHostToDevice));
// for search
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t)));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), hipHostMallocDefault));
CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), hipHostMallocDefault));
//host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t));
// start
CUDA_SAFE_CALL(hipDeviceSynchronize());
int lower_bond;
ins_time = 0, search_time = 0, del_time = 0;
persist_time = 0, operation_time = 0, memcpy_time = 0;
int num_ops = 100;
int num_get = 95;
int num_set = num_ops - num_get;
for (int has = 0; has < num_ops; has++) {
int selection = rand() % (num_get + num_set);
if(selection < num_set) {
--num_set;
/* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
// sig
((selem_t *)host_search_in)[i].sig
= ((ielem_t *)host_in)[i].sig = rand();
// hash
((selem_t *)host_search_in)[i].hash
= ((ielem_t *)host_in)[i].hash
= lower_bond + rand() % HASH_BLOCK_ELEM_NUM;
// loc
((loc_t *)host_search_verify)[i]
= ((ielem_t *)host_in)[i].loc = (loc_t)rand();
//cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[i].loc;
//printf("%d\n", ((int *)host_search_verify)[i]);
}
//for debugging
for (i = 0; i < SELEM_NUM; i += 1) {
//printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM,
//(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM);
}
auto start_time = TIME_NOW;
CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), hipMemcpyHostToDevice));
hipDeviceSynchronize();
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, imkv_file,
0, operation_time, memcpy_time, persist_time);
CUDA_SAFE_CALL(hipDeviceSynchronize());
ins_time += time_diff(TIME_NOW, start_time)/ 1000.0f;
printf("Batch %d. INSERT: insert %f ms, search %f ms\n", has, ins_time, search_time);
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
// verify with search
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost));
for (i = 0; i < SELEM_NUM; i ++) {
if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i]
&& ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
((loc_t *)host_search_verify)[i]);
assert(false);
}
}
}
else {
--num_get;
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
uint32_t get_key = (uint32_t)mehcached_zipf_next(&zipf_state) + 1;
assert(get_key < PRELOAD_CNT);
// sig
((selem_t *)host_search_in)[i].sig = get_key;
// hash
((selem_t *)host_search_in)[i].hash = get_key;
}
auto search_start = TIME_NOW;
CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
hipDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost));
search_time += (double)time_diff(TIME_NOW, search_start) / 1000.0;
for (i = 0; i < SELEM_NUM; i ++) {
loc_t loc = cpu_map[selem_t(((selem_t *)host_search_in)[i].sig, ((selem_t *)host_search_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d, key %d : out %lx and %lx, should be : %lx\n", i,
((selem_t *)host_search_in)[i].sig, ((loc_t *)host_search_out)[i<<1],
((loc_t *)host_search_out)[(i<<1)+1], loc);
assert(false);
}
}
printf("Batch %d. SEARCH: insert %f ms, search %f ms\n", has, ins_time, search_time);
}
}
printf("\nOperation execution time: %f ms\n", operation_time/1000000.0);
printf("MemcpyTime\t%f\tms\nPersistTime\t%f\n", memcpy_time/1000000.0, persist_time/1000000.0);
printf("\n\n");
printf("Insert: %f ms, search: %f ms\n", ins_time, search_time);
printf("Runtime\t%f\tms\n", ins_time + search_time);
return 0;
}
|
c875d5cb3ba2a390ef83a93d261b900c2b405b6a.cu
|
/*
* Copyright (c) 2015 Kai Zhang ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define EMULATE_NVM_BW
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <cuda_runtime.h>
#include <chrono>
#include <byteswap.h>
#include <map>
#include "gpu_hash_mm.h"
#include "zipf.h"
#define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK)
#define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK)
#define LOAD_FACTOR 1 / 8
#define PRELOAD_CNT (uint32_t)(((1 << 30)/8) * LOAD_FACTOR)
#define TOTAL_CNT (((uint32_t)1 << 31) - 1)
#define ZIPF_THETA 0.99
double persist_time = 0, operation_time = 0, memcpy_time = 0;
#define TIME_NOW std::chrono::high_resolution_clock::now()
#define time_diff(a, b) std::chrono::duration_cast<std::chrono::microseconds>(a - b).count()
int main(int argc, char *argv[])
{
ddio_on();
int SELEM_NUM, THREAD_NUM;
if (argc != 3) {
printf("usage: ./run #elem_num #thread_num, now running with 16384\n");
SELEM_NUM = 16384 * 128;
THREAD_NUM = 16384 * 2;
} else {
SELEM_NUM = atoi(argv[1]);
THREAD_NUM = atoi(argv[2]);
}
printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM);
struct zipf_gen_state zipf_state;
mehcached_zipf_init(&zipf_state, (uint64_t)PRELOAD_CNT - 2, (double)ZIPF_THETA, (uint64_t)21);
uint8_t *device_hash_table;
uint8_t *device_in;
uint8_t *host_in;
ielem_t *blk_input_h[INSERT_BLOCK];
int blk_elem_num_h[INSERT_BLOCK];
ielem_t **blk_input_d;
int *blk_elem_num_d;
int i;
std::map<selem_t, loc_t> cpu_map;
uint8_t *device_search_in;
uint8_t *device_search_out;
uint8_t *host_search_in;
uint8_t *host_search_out;
uint8_t *host_search_verify;
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_hash_table), HT_SIZE));
CUDA_SAFE_CALL(cudaMemset((void *)device_hash_table, 0, HT_SIZE));
// Allocate memory for preloading keys into KVS
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_in), PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaMemset((void *)device_in, 0, PRELOAD_CNT * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_in), PRELOAD_CNT * sizeof(ielem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*((int)PRELOAD_CNT/INSERT_BLOCK)]);
blk_elem_num_h[i] = 0;
}
// for search
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_in), PRELOAD_CNT * sizeof(selem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_in), PRELOAD_CNT * sizeof(selem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_out), 2 * PRELOAD_CNT * sizeof(loc_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_out), 2 * PRELOAD_CNT * sizeof(loc_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_verify), PRELOAD_CNT * sizeof(loc_t), cudaHostAllocDefault));
// Generate keys
printf("Generate %d keys\n", PRELOAD_CNT);
int num_keys = PRELOAD_CNT / INSERT_BLOCK;
for(int i = 0; i < PRELOAD_CNT; ++i) {
int blk = (i + 1) % INSERT_BLOCK;
int index = num_keys * blk + blk_elem_num_h[blk];
// sig
((ielem_t *)host_in)[index].sig =
((selem_t *)host_search_in)[index].sig =
(i + 1);
// hash
((ielem_t *)host_in)[index].hash =
((selem_t *)host_search_in)[index].hash =
(i + 1);
// loc
((ielem_t *)host_in)[index].loc = (loc_t)rand();
cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[index].loc;
blk_elem_num_h[blk]++;
}
CUDA_SAFE_CALL(cudaMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, PRELOAD_CNT * sizeof(ielem_t), cudaMemcpyHostToDevice));
int is_pmem;
size_t file_size;
void *imkv_file = pmem_map_file("/pmem/imkv_mm", HT_SIZE, PMEM_FILE_CREATE, S_IRWXU | S_IRWXG | S_IRWXO, &file_size, &is_pmem);
// Insert preload keys
printf("Preload %d keys\n", PRELOAD_CNT);
double ins_time = 0, search_time = 0, del_time = 0;
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d, (int *)blk_elem_num_d,
INSERT_BLOCK, PRELOAD_CNT, imkv_file,
0, operation_time, memcpy_time, persist_time);
// verify with search
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in, PRELOAD_CNT * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 1, 2 * PRELOAD_CNT * sizeof(loc_t)));
printf("Verify %d keys\n", PRELOAD_CNT);
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, PRELOAD_CNT, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * PRELOAD_CNT * sizeof(loc_t), cudaMemcpyDeviceToHost));
for (i = 0; i < PRELOAD_CNT; i ++) {
loc_t loc = cpu_map[selem_t(((ielem_t *)host_in)[i].sig, ((ielem_t *)host_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
loc);
}
}
// Free memory for preload
CUDA_SAFE_CALL(cudaFree(device_in));
CUDA_SAFE_CALL(cudaFree(blk_input_d));
CUDA_SAFE_CALL(cudaFree(blk_elem_num_d));
CUDA_SAFE_CALL(cudaFree(device_search_in));
CUDA_SAFE_CALL(cudaFree(device_search_out));
CUDA_SAFE_CALL(cudaFreeHost(host_in));
CUDA_SAFE_CALL(cudaFreeHost(host_search_in));
CUDA_SAFE_CALL(cudaFreeHost(host_search_out));
CUDA_SAFE_CALL(cudaFreeHost(host_search_verify));
// Allocate for actual insert/searches
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *)));
CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int)));
for (i = 0; i < INSERT_BLOCK; i ++) {
blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]);
blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK;
}
CUDA_SAFE_CALL(cudaMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), cudaMemcpyHostToDevice));
// for search
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t)));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault));
//host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t));
// start
CUDA_SAFE_CALL(cudaDeviceSynchronize());
int lower_bond;
ins_time = 0, search_time = 0, del_time = 0;
persist_time = 0, operation_time = 0, memcpy_time = 0;
int num_ops = 100;
int num_get = 95;
int num_set = num_ops - num_get;
for (int has = 0; has < num_ops; has++) {
int selection = rand() % (num_get + num_set);
if(selection < num_set) {
--num_set;
/* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
// sig
((selem_t *)host_search_in)[i].sig
= ((ielem_t *)host_in)[i].sig = rand();
// hash
((selem_t *)host_search_in)[i].hash
= ((ielem_t *)host_in)[i].hash
= lower_bond + rand() % HASH_BLOCK_ELEM_NUM;
// loc
((loc_t *)host_search_verify)[i]
= ((ielem_t *)host_in)[i].loc = (loc_t)rand();
//cpu_map[selem_t(i+1, i+1)] = ((ielem_t *)host_in)[i].loc;
//printf("%d\n", ((int *)host_search_verify)[i]);
}
//for debugging
for (i = 0; i < SELEM_NUM; i += 1) {
//printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM,
//(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM);
assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM);
}
auto start_time = TIME_NOW;
CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
gpu_hash_insert((bucket_t *)device_hash_table,
(ielem_t **)blk_input_d,
(int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, imkv_file,
0, operation_time, memcpy_time, persist_time);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
ins_time += time_diff(TIME_NOW, start_time)/ 1000.0f;
printf("Batch %d. INSERT: insert %f ms, search %f ms\n", has, ins_time, search_time);
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
// verify with search
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost));
for (i = 0; i < SELEM_NUM; i ++) {
if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i]
&& ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) {
printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i,
((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1],
((loc_t *)host_search_verify)[i]);
assert(false);
}
}
}
else {
--num_get;
/* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */
for (i = 0; i < SELEM_NUM; i += 1) {
lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM;
uint32_t get_key = (uint32_t)mehcached_zipf_next(&zipf_state) + 1;
assert(get_key < PRELOAD_CNT);
// sig
((selem_t *)host_search_in)[i].sig = get_key;
// hash
((selem_t *)host_search_in)[i].hash = get_key;
}
auto search_start = TIME_NOW;
CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in,
SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t)));
// ---------------------------
gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out,
(bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0);
cudaDeviceSynchronize();
// ---------------------------
CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out,
2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost));
search_time += (double)time_diff(TIME_NOW, search_start) / 1000.0;
for (i = 0; i < SELEM_NUM; i ++) {
loc_t loc = cpu_map[selem_t(((selem_t *)host_search_in)[i].sig, ((selem_t *)host_search_in)[i].hash)];
if(((loc_t *)host_search_out)[i<<1] != loc
&& ((loc_t *)host_search_out)[(i<<1)+1] != loc) {
printf("not found insertion %d, key %d : out %lx and %lx, should be : %lx\n", i,
((selem_t *)host_search_in)[i].sig, ((loc_t *)host_search_out)[i<<1],
((loc_t *)host_search_out)[(i<<1)+1], loc);
assert(false);
}
}
printf("Batch %d. SEARCH: insert %f ms, search %f ms\n", has, ins_time, search_time);
}
}
printf("\nOperation execution time: %f ms\n", operation_time/1000000.0);
printf("MemcpyTime\t%f\tms\nPersistTime\t%f\n", memcpy_time/1000000.0, persist_time/1000000.0);
printf("\n\n");
printf("Insert: %f ms, search: %f ms\n", ins_time, search_time);
printf("Runtime\t%f\tms\n", ins_time + search_time);
return 0;
}
|
1ecf65369362f9cf2e727e0e9f6a97911aa97765.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream1, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream2, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream3, 0, c, N);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
checkElementsAre(7, c, N);
/*
* Destroy streams when they are no longer needed.
*/
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipFree(a);
hipFree(b);
hipFree(c);
}
|
1ecf65369362f9cf2e727e0e9f6a97911aa97765.cu
|
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
checkElementsAre(7, c, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
272a59302713d731f73880977d7f21ef7dbeb420.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection_parallel.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
texture<float, hipTextureType3D , hipReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float DSO,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceil((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const angles,int nangles){
// copy data to CUDA memory
hipArray *d_imagedata = 0;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
hipMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1);
dim3 block(32,32,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=angles[i*3];
geo.theta=angles[i*3+1];
geo.psi =angles[i*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,i);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
hipLaunchKernelGGL(( kernelPixelDetector_parallel), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin,geo.DSO[i],floor(maxdist));
cudaCheckErrors("Kernel fail");
// copy result to host
hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dProjection);
hipFreeArray(d_imagedata);
cudaCheckErrors("hipFree d_imagedata fail");
hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
|
272a59302713d731f73880977d7f21ef7dbeb420.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection_parallel.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float DSO,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceil((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const angles,int nangles){
// copy data to CUDA memory
cudaArray *d_imagedata = 0;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
cudaMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1);
dim3 block(32,32,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=angles[i*3];
geo.theta=angles[i*3+1];
geo.psi =angles[i*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,i);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
kernelPixelDetector_parallel<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin,geo.DSO[i],floor(maxdist));
cudaCheckErrors("Kernel fail");
// copy result to host
cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dProjection);
cudaFreeArray(d_imagedata);
cudaCheckErrors("cudaFree d_imagedata fail");
cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
|
a9537489a0af614f9d16056af926b4f4f32d9d63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/transpose_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data, Dtype* to_data,
const int* from_counts, const int* to_counts, const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds=buf + index * num_axes;
int from_index = index, to_index = 0;
for(int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for(int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(), forward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(), backward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
|
a9537489a0af614f9d16056af926b4f4f32d9d63.cu
|
#include "caffe/layers/transpose_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data, Dtype* to_data,
const int* from_counts, const int* to_counts, const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds=buf + index * num_axes;
int from_index = index, to_index = 0;
for(int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for(int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(), forward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(), backward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
|
9409cde71128491e17c8afb3c401e88a36b82924.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=====================================================================
// MAIN FUNCTION
//=====================================================================
double master(fp timeinst,
fp* initvalu,
fp* parameter,
fp* finavalu,
fp* com,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com)
{
//=====================================================================
// VARIABLES
//=====================================================================
// counters
int i;
// offset pointers
int initvalu_offset_ecc;
int initvalu_offset_Dyad;
int initvalu_offset_SL;
int initvalu_offset_Cyt;
dim3 threads;
dim3 blocks;
//=====================================================================
// execute ECC&CAM kernel - it runs ECC and CAMs in parallel
//=====================================================================
int d_initvalu_mem;
d_initvalu_mem = EQUATIONS * sizeof(fp);
int d_finavalu_mem;
d_finavalu_mem = EQUATIONS * sizeof(fp);
int d_params_mem;
d_params_mem = PARAMETERS * sizeof(fp);
int d_com_mem;
d_com_mem = 3 * sizeof(fp);
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("initvalu %d %f\n", i, initvalu[i]);
for (int i = 0; i < PARAMETERS; i++)
printf("params %d %f\n", i, parameter[i]);
printf("\n");
#endif
hipMemcpy(d_initvalu, initvalu, d_initvalu_mem, hipMemcpyHostToDevice);
hipMemcpy(d_params, parameter, d_params_mem, hipMemcpyHostToDevice);
threads.x = NUMBER_THREADS;
threads.y = 1;
blocks.x = 2;
blocks.y = 1;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(kernel, blocks, threads, 0, 0,
timeinst,
d_initvalu,
d_finavalu,
d_params,
d_com);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipMemcpy(finavalu, d_finavalu, d_finavalu_mem, hipMemcpyDeviceToHost);
hipMemcpy(com, d_com, d_com_mem, hipMemcpyDeviceToHost);
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("finavalu %d %f\n", i, finavalu[i]);
for (int i = 0; i < 3; i++)
printf("%f ", com[i]);
printf("\n");
#endif
//=====================================================================
// FINAL KERNEL
//=====================================================================
initvalu_offset_ecc = 0;
initvalu_offset_Dyad = 46;
initvalu_offset_SL = 61;
initvalu_offset_Cyt = 76;
kernel_fin(
initvalu,
initvalu_offset_ecc,
initvalu_offset_Dyad,
initvalu_offset_SL,
initvalu_offset_Cyt,
parameter,
finavalu,
com[0],
com[1],
com[2]);
//=====================================================================
// COMPENSATION FOR NANs and INFs
//=====================================================================
for(i=0; i<EQUATIONS; i++){
if (isnan(finavalu[i])){
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
}
else if (isinf(finavalu[i])){
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
return time;
}
|
9409cde71128491e17c8afb3c401e88a36b82924.cu
|
//=====================================================================
// MAIN FUNCTION
//=====================================================================
double master(fp timeinst,
fp* initvalu,
fp* parameter,
fp* finavalu,
fp* com,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com)
{
//=====================================================================
// VARIABLES
//=====================================================================
// counters
int i;
// offset pointers
int initvalu_offset_ecc;
int initvalu_offset_Dyad;
int initvalu_offset_SL;
int initvalu_offset_Cyt;
dim3 threads;
dim3 blocks;
//=====================================================================
// execute ECC&CAM kernel - it runs ECC and CAMs in parallel
//=====================================================================
int d_initvalu_mem;
d_initvalu_mem = EQUATIONS * sizeof(fp);
int d_finavalu_mem;
d_finavalu_mem = EQUATIONS * sizeof(fp);
int d_params_mem;
d_params_mem = PARAMETERS * sizeof(fp);
int d_com_mem;
d_com_mem = 3 * sizeof(fp);
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("initvalu %d %f\n", i, initvalu[i]);
for (int i = 0; i < PARAMETERS; i++)
printf("params %d %f\n", i, parameter[i]);
printf("\n");
#endif
hipMemcpy(d_initvalu, initvalu, d_initvalu_mem, hipMemcpyHostToDevice);
hipMemcpy(d_params, parameter, d_params_mem, hipMemcpyHostToDevice);
threads.x = NUMBER_THREADS;
threads.y = 1;
blocks.x = 2;
blocks.y = 1;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(kernel, blocks, threads, 0, 0,
timeinst,
d_initvalu,
d_finavalu,
d_params,
d_com);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipMemcpy(finavalu, d_finavalu, d_finavalu_mem, hipMemcpyDeviceToHost);
hipMemcpy(com, d_com, d_com_mem, hipMemcpyDeviceToHost);
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("finavalu %d %f\n", i, finavalu[i]);
for (int i = 0; i < 3; i++)
printf("%f ", com[i]);
printf("\n");
#endif
//=====================================================================
// FINAL KERNEL
//=====================================================================
initvalu_offset_ecc = 0;
initvalu_offset_Dyad = 46;
initvalu_offset_SL = 61;
initvalu_offset_Cyt = 76;
kernel_fin(
initvalu,
initvalu_offset_ecc,
initvalu_offset_Dyad,
initvalu_offset_SL,
initvalu_offset_Cyt,
parameter,
finavalu,
com[0],
com[1],
com[2]);
//=====================================================================
// COMPENSATION FOR NANs and INFs
//=====================================================================
for(i=0; i<EQUATIONS; i++){
if (isnan(finavalu[i])){
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
}
else if (isinf(finavalu[i])){
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
return time;
}
|
52bfe1bcb5b8bbaf8df925c14611b9881135a467.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(float *values_d, float *oldval_d, float *newval_d, int nsteps, int tpoints)
{
int i;
int id = blockIdx.x * blockDim.x + threadIdx.x + 1;
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
/* Update values for each time step */
for (i = 1; i<= nsteps; i++) {
/* Update points along line for this time step */
/* global endpoints */
if (id > 0 && id <= tpoints) {
if ((id == 1) || (id == tpoints))
newval_d[id] = 0.0;
else
newval_d[id] = (2.0 * values_d[id]) - oldval_d[id] + (sqtau * (-2.0)*values_d[id]); // do_math()
}
/* Update old values with new values */
if (id > 0 && id <= tpoints) {
oldval_d[id] = values_d[id];
values_d[id] = newval_d[id];
}
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
float *values_d, *oldval_d, *newval_d;
int size = (MAXPOINTS + 2) * sizeof(float);
hipMalloc(&values_d, size);
hipMemcpy(values_d, values, size, hipMemcpyHostToDevice);
hipMalloc(&oldval_d, size);
hipMemcpy(oldval_d, oldval, size, hipMemcpyHostToDevice);
hipMalloc(&newval_d, size);
hipMemcpy(newval_d, newval, size, hipMemcpyHostToDevice);
int tile = 256;
dim3 dimGrid(tile);
dim3 dimBlock(tpoints / tile + 1);
hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, values_d, oldval_d, newval_d, nsteps, tpoints);
hipDeviceSynchronize();
hipMemcpy(values, values_d, size, hipMemcpyDeviceToHost);
hipFree(values_d);
hipFree(oldval_d);
hipFree(newval_d);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
52bfe1bcb5b8bbaf8df925c14611b9881135a467.cu
|
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(float *values_d, float *oldval_d, float *newval_d, int nsteps, int tpoints)
{
int i;
int id = blockIdx.x * blockDim.x + threadIdx.x + 1;
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
/* Update values for each time step */
for (i = 1; i<= nsteps; i++) {
/* Update points along line for this time step */
/* global endpoints */
if (id > 0 && id <= tpoints) {
if ((id == 1) || (id == tpoints))
newval_d[id] = 0.0;
else
newval_d[id] = (2.0 * values_d[id]) - oldval_d[id] + (sqtau * (-2.0)*values_d[id]); // do_math()
}
/* Update old values with new values */
if (id > 0 && id <= tpoints) {
oldval_d[id] = values_d[id];
values_d[id] = newval_d[id];
}
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
float *values_d, *oldval_d, *newval_d;
int size = (MAXPOINTS + 2) * sizeof(float);
cudaMalloc(&values_d, size);
cudaMemcpy(values_d, values, size, cudaMemcpyHostToDevice);
cudaMalloc(&oldval_d, size);
cudaMemcpy(oldval_d, oldval, size, cudaMemcpyHostToDevice);
cudaMalloc(&newval_d, size);
cudaMemcpy(newval_d, newval, size, cudaMemcpyHostToDevice);
int tile = 256;
dim3 dimGrid(tile);
dim3 dimBlock(tpoints / tile + 1);
update <<<dimGrid, dimBlock>>> (values_d, oldval_d, newval_d, nsteps, tpoints);
cudaDeviceSynchronize();
cudaMemcpy(values, values_d, size, cudaMemcpyDeviceToHost);
cudaFree(values_d);
cudaFree(oldval_d);
cudaFree(newval_d);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
cc5546833a24f313984bab361013d2946142a980.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_gtScalar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_gtScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_gtScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_gtScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
cc5546833a24f313984bab361013d2946142a980.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_gtScalar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_gtScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_gtScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_gtScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e1a3a303e5c727a45634a2283b0f003d1a71c1aa.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "matrix.h"
#include "nv_wavenet.cuh"
#include "nv_wavenet_util.cuh"
#include "nv_wavenet_reference.h"
#include <assert.h>
#include <stdio.h>
#include <vector>
Matrix* createMatrix(int r, int c) {
float mean = 0.0;
float scale = 0.5 / r;
Matrix* m = new Matrix(r,c,false);
m->randomize(mean,scale);
return m;
}
template <typename T_weight, typename T_data, int R, int S, int A>
void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool inputsFromDevice=false, bool weightsFromDevice=false) {
float mean = 0.0;
float scale = 0.5 / R;
// Just encode one-hot vector as an integer
std::vector<int> yInPrev(batch_size);
std::vector<int> yInCur(batch_size);
for (int b=0; b<batch_size; b++) {
yInPrev[b] = rand() % A;
yInCur[b] = rand() % A;
}
std::vector<int> yOut(batch_size);
Matrix outputSelectors(batch_size,samples_per_iteration);
outputSelectors.randomize(0.5,1.0);
Matrix embeddingsPrev(R,A,false);
Matrix embeddingsCur(R,A,false);
embeddingsPrev.randomize(mean,scale);
embeddingsCur.randomize(mean,scale);
std::vector<Matrix*> Wprev(num_layers);
std::vector<Matrix*> Wcur(num_layers);
std::vector<Matrix*> Bh(num_layers);
std::vector<Matrix*> Wres(num_layers);
std::vector<Matrix*> Bres(num_layers);
std::vector<Matrix*> Wskip(num_layers);
std::vector<Matrix*> Bskip(num_layers);
std::vector<Matrix*> skipOut(num_layers+1);
// Retain results for dilated inputs
std::vector<std::vector<Matrix*>> Xt(samples_per_iteration);
for (int sample=0; sample<samples_per_iteration; sample++) {
Xt[sample].resize(num_layers+1);
}
for (int l=0; l<num_layers; l++) {
// Weights
Wprev[l] = createMatrix(2*R,R);
Wcur[l] = createMatrix(2*R,R);
Bh[l] = createMatrix(2*R,1);
Wres[l] = createMatrix(R,R);
Bres[l] = createMatrix(R,1);
Wskip[l] = createMatrix(S,R);
Bskip[l] = createMatrix(S,1);
// Activations
skipOut[l] = createMatrix(S,batch_size);
}
for (int sample=0; sample<samples_per_iteration; sample++) {
for (int layer=0; layer<num_layers+1; layer++) {
Xt[sample][layer] = createMatrix(R, batch_size);
}
}
Matrix WskipOut(A,S,false);
WskipOut.randomize(mean,scale);
Matrix BskipOut(A,1,false);
BskipOut.randomize(mean, scale);
Matrix Wout(A,A,false);
Wout.randomize(mean,scale);
Matrix Bout(A,1,false);
Bout.randomize(mean,scale);
Matrix skipOutFinal(A,batch_size,false);
Matrix out(A,batch_size,false);
Matrix p(A,batch_size,false);
Matrix zero(S,batch_size,false);
for (int row = 0; row < S; row++) {
for (int col = 0; col < batch_size; col++) {
zero.set(row,col,0.f);
}
}
nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation);
nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, samples_per_iteration, impl);
ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
if (weightsFromDevice) {
float* d_embeddingsPrev;
float* d_embeddingsCur;
gpuErrChk(hipMalloc(&d_embeddingsPrev, R*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_embeddingsCur, R*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), hipMemcpyHostToDevice));
infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur);
gpuErrChk(hipFree(d_embeddingsPrev));
gpuErrChk(hipFree(d_embeddingsCur));
float* d_Wprev;
float* d_Wcur;
float* d_Bh;
float* d_Wres;
float* d_Bres;
float* d_Wskip;
float* d_Bskip;
for (int l=0; l<num_layers; l++) {
gpuErrChk(hipMalloc(&d_Wprev, 2*R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wcur, 2*R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bh, 2*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wres, R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bres, R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wskip, S*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bskip, S*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), hipMemcpyHostToDevice));
infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip);
gpuErrChk(hipFree(d_Wprev));
gpuErrChk(hipFree(d_Wcur));
gpuErrChk(hipFree(d_Bh));
gpuErrChk(hipFree(d_Wres));
gpuErrChk(hipFree(d_Bres));
gpuErrChk(hipFree(d_Wskip));
gpuErrChk(hipFree(d_Bskip));
}
float* d_WskipOut;
float* d_BskipOut;
float* d_Wout;
float* d_Bout;
gpuErrChk(hipMalloc(&d_WskipOut, A*S*sizeof(float)));
gpuErrChk(hipMemcpy(d_WskipOut, WskipOut.data(), A*S*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_BskipOut, A*sizeof(float)));
gpuErrChk(hipMemcpy(d_BskipOut, BskipOut.data(), A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wout, A*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wout, Wout.data(), A*A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bout, A*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bout, Bout.data(), A*sizeof(float), hipMemcpyHostToDevice));
infer->setOutWeights(d_WskipOut, d_BskipOut, d_Wout, d_Bout);
gpuErrChk(hipFree(d_WskipOut));
gpuErrChk(hipFree(d_BskipOut));
gpuErrChk(hipFree(d_Wout));
gpuErrChk(hipFree(d_Bout));
}
else {
infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
infer->setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
}
Matrix zeroMatrix(R,batch_size,false);
for (int row=0; row<R; row++) {
for (int col=0; col<batch_size; col++) {
zeroMatrix.set(row,col,0.f);
}
}
Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size);
assert(Lh.data());
Lh.randomize(mean,scale);
ref.setInputs(Lh.data(), outputSelectors.data());
if (inputsFromDevice) {
float* d_Lh;
gpuErrChk(hipMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float)));
float* d_outputSelectors;
gpuErrChk(hipMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float)));
gpuErrChk(hipMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), hipMemcpyHostToDevice));
infer->setInputs(d_Lh, d_outputSelectors);
gpuErrChk(hipFree(d_Lh));
gpuErrChk(hipFree(d_outputSelectors));
}
else {
infer->setInputs(Lh.data(), outputSelectors.data());
}
for (int i=0; i<num_iterations; i++) {
printf("Iteration: %d\n", i);
// Run reference implementation
int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1;
int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
ref.run(samples_per_iteration, batch_size, refYout);
assert(infer->run_chunks(7, [](int*, int, int){}, samples_per_iteration, batch_size, mcYout, batch_size_per_block));
gpuErrChk(hipDeviceSynchronize());
// Check results
for (int l=0; l<num_layers; l++) {
printf("Checking layer %d\n", l);
Matrix refXout(R,batch_size);
Matrix refSkipOut(S, batch_size);
ref.getXtOut(l, refXout.data());
ref.getSkipOut(l, refSkipOut.data());
Matrix mcXout(R,batch_size,false);
Matrix mcSkipOut(S,batch_size,false);
infer->getXtOut(l, mcXout.data());
infer->getSkipOut(l, mcSkipOut.data());
matrix_compare("Xout", refXout, mcXout, 1.e-3);
matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true);
}
Matrix refSkipOutFinal(A,batch_size);
ref.getZs(refSkipOutFinal.data());
Matrix mcSkipOutFinal(A,batch_size,false);
infer->getZs(mcSkipOutFinal.data());
matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true);
Matrix refOut(A,batch_size);
ref.getZa(refOut.data());
Matrix mcOut(A,batch_size,false);
infer->getZa(mcOut.data());
matrix_compare("Za", refOut, mcOut, 1.e-4);
Matrix refP(A,batch_size);
ref.getP(refP.data());
Matrix mcP(A,batch_size,false);
infer->getP(mcP.data());
matrix_compare("p",refP,mcP,1.e-3);
printf("Comparing yOut\n");
for (int i=0; i<samples_per_iteration*batch_size; i++) {
assert(refYout[i] == mcYout[i]);
}
free(mcYout);
free(refYout);
printf("SUCCESS!\n");
}
// Clean up
delete infer;
for (int l=0; l<num_layers; l++) {
delete Wprev[l];
delete Wcur[l];
delete Bh[l];
delete Wres[l];
delete Bres[l];
delete Wskip[l];
delete Bskip[l];
for (int sample=0; sample<samples_per_iteration;sample++) {
delete Xt[sample][l];
}
delete skipOut[l];
}
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int batch_size = 16;
if (argc > 1) num_layers = atoi(argv[1]);
if (argc > 2) batch_size = atoi(argv[2]);
// How many samples to generate each time we invoke the kernel
const int SAMPLES_PER_ITERATION = 8;
const int MAX_DILATION = SAMPLES_PER_ITERATION;
srand(3);
printf("Testing R=32, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=64, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false);
printf(" Testing Dual-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true);
printf(" Testing Persistent\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true);
printf("Testing R=64, S=256\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=128, S=256\n");
printf(" Testing Persistent\n");
runTest<float,float,128,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
}
|
e1a3a303e5c727a45634a2283b0f003d1a71c1aa.cu
|
/******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "matrix.h"
#include "nv_wavenet.cuh"
#include "nv_wavenet_util.cuh"
#include "nv_wavenet_reference.h"
#include <assert.h>
#include <stdio.h>
#include <vector>
Matrix* createMatrix(int r, int c) {
float mean = 0.0;
float scale = 0.5 / r;
Matrix* m = new Matrix(r,c,false);
m->randomize(mean,scale);
return m;
}
template <typename T_weight, typename T_data, int R, int S, int A>
void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool inputsFromDevice=false, bool weightsFromDevice=false) {
float mean = 0.0;
float scale = 0.5 / R;
// Just encode one-hot vector as an integer
std::vector<int> yInPrev(batch_size);
std::vector<int> yInCur(batch_size);
for (int b=0; b<batch_size; b++) {
yInPrev[b] = rand() % A;
yInCur[b] = rand() % A;
}
std::vector<int> yOut(batch_size);
Matrix outputSelectors(batch_size,samples_per_iteration);
outputSelectors.randomize(0.5,1.0);
Matrix embeddingsPrev(R,A,false);
Matrix embeddingsCur(R,A,false);
embeddingsPrev.randomize(mean,scale);
embeddingsCur.randomize(mean,scale);
std::vector<Matrix*> Wprev(num_layers);
std::vector<Matrix*> Wcur(num_layers);
std::vector<Matrix*> Bh(num_layers);
std::vector<Matrix*> Wres(num_layers);
std::vector<Matrix*> Bres(num_layers);
std::vector<Matrix*> Wskip(num_layers);
std::vector<Matrix*> Bskip(num_layers);
std::vector<Matrix*> skipOut(num_layers+1);
// Retain results for dilated inputs
std::vector<std::vector<Matrix*>> Xt(samples_per_iteration);
for (int sample=0; sample<samples_per_iteration; sample++) {
Xt[sample].resize(num_layers+1);
}
for (int l=0; l<num_layers; l++) {
// Weights
Wprev[l] = createMatrix(2*R,R);
Wcur[l] = createMatrix(2*R,R);
Bh[l] = createMatrix(2*R,1);
Wres[l] = createMatrix(R,R);
Bres[l] = createMatrix(R,1);
Wskip[l] = createMatrix(S,R);
Bskip[l] = createMatrix(S,1);
// Activations
skipOut[l] = createMatrix(S,batch_size);
}
for (int sample=0; sample<samples_per_iteration; sample++) {
for (int layer=0; layer<num_layers+1; layer++) {
Xt[sample][layer] = createMatrix(R, batch_size);
}
}
Matrix WskipOut(A,S,false);
WskipOut.randomize(mean,scale);
Matrix BskipOut(A,1,false);
BskipOut.randomize(mean, scale);
Matrix Wout(A,A,false);
Wout.randomize(mean,scale);
Matrix Bout(A,1,false);
Bout.randomize(mean,scale);
Matrix skipOutFinal(A,batch_size,false);
Matrix out(A,batch_size,false);
Matrix p(A,batch_size,false);
Matrix zero(S,batch_size,false);
for (int row = 0; row < S; row++) {
for (int col = 0; col < batch_size; col++) {
zero.set(row,col,0.f);
}
}
nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation);
nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, samples_per_iteration, impl);
ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
if (weightsFromDevice) {
float* d_embeddingsPrev;
float* d_embeddingsCur;
gpuErrChk(cudaMalloc(&d_embeddingsPrev, R*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_embeddingsCur, R*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), cudaMemcpyHostToDevice));
infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur);
gpuErrChk(cudaFree(d_embeddingsPrev));
gpuErrChk(cudaFree(d_embeddingsCur));
float* d_Wprev;
float* d_Wcur;
float* d_Bh;
float* d_Wres;
float* d_Bres;
float* d_Wskip;
float* d_Bskip;
for (int l=0; l<num_layers; l++) {
gpuErrChk(cudaMalloc(&d_Wprev, 2*R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wcur, 2*R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bh, 2*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wres, R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bres, R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wskip, S*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bskip, S*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), cudaMemcpyHostToDevice));
infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip);
gpuErrChk(cudaFree(d_Wprev));
gpuErrChk(cudaFree(d_Wcur));
gpuErrChk(cudaFree(d_Bh));
gpuErrChk(cudaFree(d_Wres));
gpuErrChk(cudaFree(d_Bres));
gpuErrChk(cudaFree(d_Wskip));
gpuErrChk(cudaFree(d_Bskip));
}
float* d_WskipOut;
float* d_BskipOut;
float* d_Wout;
float* d_Bout;
gpuErrChk(cudaMalloc(&d_WskipOut, A*S*sizeof(float)));
gpuErrChk(cudaMemcpy(d_WskipOut, WskipOut.data(), A*S*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_BskipOut, A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_BskipOut, BskipOut.data(), A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wout, A*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wout, Wout.data(), A*A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bout, A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bout, Bout.data(), A*sizeof(float), cudaMemcpyHostToDevice));
infer->setOutWeights(d_WskipOut, d_BskipOut, d_Wout, d_Bout);
gpuErrChk(cudaFree(d_WskipOut));
gpuErrChk(cudaFree(d_BskipOut));
gpuErrChk(cudaFree(d_Wout));
gpuErrChk(cudaFree(d_Bout));
}
else {
infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
infer->setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
}
Matrix zeroMatrix(R,batch_size,false);
for (int row=0; row<R; row++) {
for (int col=0; col<batch_size; col++) {
zeroMatrix.set(row,col,0.f);
}
}
Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size);
assert(Lh.data());
Lh.randomize(mean,scale);
ref.setInputs(Lh.data(), outputSelectors.data());
if (inputsFromDevice) {
float* d_Lh;
gpuErrChk(cudaMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float)));
float* d_outputSelectors;
gpuErrChk(cudaMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), cudaMemcpyHostToDevice));
infer->setInputs(d_Lh, d_outputSelectors);
gpuErrChk(cudaFree(d_Lh));
gpuErrChk(cudaFree(d_outputSelectors));
}
else {
infer->setInputs(Lh.data(), outputSelectors.data());
}
for (int i=0; i<num_iterations; i++) {
printf("Iteration: %d\n", i);
// Run reference implementation
int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1;
int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
ref.run(samples_per_iteration, batch_size, refYout);
assert(infer->run_chunks(7, [](int*, int, int){}, samples_per_iteration, batch_size, mcYout, batch_size_per_block));
gpuErrChk(cudaDeviceSynchronize());
// Check results
for (int l=0; l<num_layers; l++) {
printf("Checking layer %d\n", l);
Matrix refXout(R,batch_size);
Matrix refSkipOut(S, batch_size);
ref.getXtOut(l, refXout.data());
ref.getSkipOut(l, refSkipOut.data());
Matrix mcXout(R,batch_size,false);
Matrix mcSkipOut(S,batch_size,false);
infer->getXtOut(l, mcXout.data());
infer->getSkipOut(l, mcSkipOut.data());
matrix_compare("Xout", refXout, mcXout, 1.e-3);
matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true);
}
Matrix refSkipOutFinal(A,batch_size);
ref.getZs(refSkipOutFinal.data());
Matrix mcSkipOutFinal(A,batch_size,false);
infer->getZs(mcSkipOutFinal.data());
matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true);
Matrix refOut(A,batch_size);
ref.getZa(refOut.data());
Matrix mcOut(A,batch_size,false);
infer->getZa(mcOut.data());
matrix_compare("Za", refOut, mcOut, 1.e-4);
Matrix refP(A,batch_size);
ref.getP(refP.data());
Matrix mcP(A,batch_size,false);
infer->getP(mcP.data());
matrix_compare("p",refP,mcP,1.e-3);
printf("Comparing yOut\n");
for (int i=0; i<samples_per_iteration*batch_size; i++) {
assert(refYout[i] == mcYout[i]);
}
free(mcYout);
free(refYout);
printf("SUCCESS!\n");
}
// Clean up
delete infer;
for (int l=0; l<num_layers; l++) {
delete Wprev[l];
delete Wcur[l];
delete Bh[l];
delete Wres[l];
delete Bres[l];
delete Wskip[l];
delete Bskip[l];
for (int sample=0; sample<samples_per_iteration;sample++) {
delete Xt[sample][l];
}
delete skipOut[l];
}
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int batch_size = 16;
if (argc > 1) num_layers = atoi(argv[1]);
if (argc > 2) batch_size = atoi(argv[2]);
// How many samples to generate each time we invoke the kernel
const int SAMPLES_PER_ITERATION = 8;
const int MAX_DILATION = SAMPLES_PER_ITERATION;
srand(3);
printf("Testing R=32, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=64, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false);
printf(" Testing Dual-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true);
printf(" Testing Persistent\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true);
printf("Testing R=64, S=256\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=128, S=256\n");
printf(" Testing Persistent\n");
runTest<float,float,128,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
}
|
0a960e0163b302e47fda874f09a25a8fcf4865e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VecGeom/base/Global.h"
#include "VecGeom/volumes/PlacedVolume.h"
#include "VecGeom/base/SOA3D.h"
#include "VecGeom/navigation/NewSimpleNavigator.h"
#include "VecGeom/navigation/NavigationState.h"
#include "VecGeom/navigation/NavStatePool.h"
#ifdef VECGEOM_ENABLE_CUDA
#include "VecGeom/base/Stopwatch.h"
#include "VecGeom/backend/cuda/Backend.h"
#include "VecGeom/management/CudaManager.h"
#include "VecGeom/backend/cuda/Interface.h"
#endif
namespace vecgeom {
inline namespace cuda {
__global__ void NavigationKernel(void *gpu_ptr /* a pointer to buffer of current navigation states */,
void *gpu_out_ptr /* a pointer to buffer for next states */, int depth,
VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, Precision const *pSteps, const int n,
Precision *const steps)
{
using vecgeom::cuda::NavigationState;
using vecgeom::cuda::NavStatePool;
auto nav = NewSimpleNavigator<>::Instance(); // pointer to a navigator
Precision step;
unsigned tid = ThreadIndex();
while (tid < n) {
//.. get the navigationstate for this thread/lane
NavigationState *inState =
reinterpret_cast<NavigationState *>((char *)gpu_ptr + tid * NavigationState::SizeOfInstanceAlignAware(depth));
NavigationState *outState =
reinterpret_cast<NavigationState *>((char *)gpu_out_ptr + tid * NavigationState::SizeOfInstanceAlignAware(depth));
//.. do the actual navigation on the GPU
// nav.LocatePoint(volume, positions[tid], *inState, true);
nav->FindNextBoundaryAndStep(positions[tid], directions[tid], *inState, *outState, pSteps[tid], step);
steps[tid] = step;
// repeat
tid += ThreadOffset();
}
}
} // end of namespace cuda
// Should this function be moved to NavigationBenchmarker.cpp?
Precision runNavigationCuda(void *gpu_ptr, void *gpu_out_ptr, int depth, const cxx::VPlacedVolume *const volume,
unsigned npoints, Precision const *const posX, Precision const *const posY,
Precision const *const posZ, Precision const *const dirX, Precision const *const dirY,
Precision const *const dirZ, Precision const *const maxSteps, Precision *const propSteps)
{
// transfer geometry to GPU
using CudaVolume = cuda::VPlacedVolume const *;
using CudaSOA3D = cuda::SOA3D<Precision>;
using cxx::CudaManager;
// build a list of GPU volume pointers - needed?
// copy points to the GPU
cxx::DevicePtr<Precision> posXGpu;
posXGpu.Allocate(npoints);
cxx::DevicePtr<Precision> posYGpu;
posYGpu.Allocate(npoints);
cxx::DevicePtr<Precision> posZGpu;
posZGpu.Allocate(npoints);
posXGpu.ToDevice(posX, npoints);
posYGpu.ToDevice(posY, npoints);
posZGpu.ToDevice(posZ, npoints);
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, npoints);
// copy directions to the GPU
cxx::DevicePtr<Precision> dirXGpu;
dirXGpu.Allocate(npoints);
cxx::DevicePtr<Precision> dirYGpu;
dirYGpu.Allocate(npoints);
cxx::DevicePtr<Precision> dirZGpu;
dirZGpu.Allocate(npoints);
dirXGpu.ToDevice(dirX, npoints);
dirYGpu.ToDevice(dirY, npoints);
dirZGpu.ToDevice(dirZ, npoints);
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, npoints);
cxx::DevicePtr<Precision> maxStepsGpu;
maxStepsGpu.Allocate(npoints);
maxStepsGpu.ToDevice(maxSteps, npoints);
// allocate space for kernel output
Precision *propStepsGpu = cxx::AllocateOnGpu<Precision>(npoints * sizeof(Precision));
// launch kernel in GPU
vecgeom::cuda::LaunchParameters launch(npoints);
vecgeom::cuda::Stopwatch timer;
timer.Start();
hipLaunchKernelGGL(( vecgeom::cuda::NavigationKernel), dim3(1), dim3(1), 0, 0,
gpu_ptr, gpu_out_ptr, depth, CudaManager::Instance().world_gpu(),
positionGpu, directionGpu, maxStepsGpu, 1, propStepsGpu );
hipDeviceSynchronize();
Precision elapsedWarmup = timer.Stop();
printf("GPU config <<<1,1>>> - warm-up time: %f ms\n", 1000. * elapsedWarmup);
timer.Start();
hipLaunchKernelGGL(( vecgeom::cuda::NavigationKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0,
gpu_ptr, gpu_out_ptr, depth, CudaManager::Instance().world_gpu(), positionGpu, directionGpu, maxStepsGpu, npoints,
propStepsGpu);
hipDeviceSynchronize();
Precision elapsedCuda = timer.Stop();
printf("GPU config <<<%i,%i>>> - navigation time: %f ms\n", launch.grid_size.x, launch.block_size.x, 1000. * elapsedCuda);
cxx::CopyFromGpu(propStepsGpu, propSteps, npoints * sizeof(Precision));
// cleanup
cxx::FreeFromGpu(propStepsGpu);
posXGpu.Deallocate();
posYGpu.Deallocate();
posZGpu.Deallocate();
dirXGpu.Deallocate();
dirYGpu.Deallocate();
dirZGpu.Deallocate();
return elapsedCuda;
}
} // global namespace
|
0a960e0163b302e47fda874f09a25a8fcf4865e4.cu
|
#include "VecGeom/base/Global.h"
#include "VecGeom/volumes/PlacedVolume.h"
#include "VecGeom/base/SOA3D.h"
#include "VecGeom/navigation/NewSimpleNavigator.h"
#include "VecGeom/navigation/NavigationState.h"
#include "VecGeom/navigation/NavStatePool.h"
#ifdef VECGEOM_ENABLE_CUDA
#include "VecGeom/base/Stopwatch.h"
#include "VecGeom/backend/cuda/Backend.h"
#include "VecGeom/management/CudaManager.h"
#include "VecGeom/backend/cuda/Interface.h"
#endif
namespace vecgeom {
inline namespace cuda {
__global__ void NavigationKernel(void *gpu_ptr /* a pointer to buffer of current navigation states */,
void *gpu_out_ptr /* a pointer to buffer for next states */, int depth,
VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, Precision const *pSteps, const int n,
Precision *const steps)
{
using vecgeom::cuda::NavigationState;
using vecgeom::cuda::NavStatePool;
auto nav = NewSimpleNavigator<>::Instance(); // pointer to a navigator
Precision step;
unsigned tid = ThreadIndex();
while (tid < n) {
//.. get the navigationstate for this thread/lane
NavigationState *inState =
reinterpret_cast<NavigationState *>((char *)gpu_ptr + tid * NavigationState::SizeOfInstanceAlignAware(depth));
NavigationState *outState =
reinterpret_cast<NavigationState *>((char *)gpu_out_ptr + tid * NavigationState::SizeOfInstanceAlignAware(depth));
//.. do the actual navigation on the GPU
// nav.LocatePoint(volume, positions[tid], *inState, true);
nav->FindNextBoundaryAndStep(positions[tid], directions[tid], *inState, *outState, pSteps[tid], step);
steps[tid] = step;
// repeat
tid += ThreadOffset();
}
}
} // end of namespace cuda
// Should this function be moved to NavigationBenchmarker.cpp?
Precision runNavigationCuda(void *gpu_ptr, void *gpu_out_ptr, int depth, const cxx::VPlacedVolume *const volume,
unsigned npoints, Precision const *const posX, Precision const *const posY,
Precision const *const posZ, Precision const *const dirX, Precision const *const dirY,
Precision const *const dirZ, Precision const *const maxSteps, Precision *const propSteps)
{
// transfer geometry to GPU
using CudaVolume = cuda::VPlacedVolume const *;
using CudaSOA3D = cuda::SOA3D<Precision>;
using cxx::CudaManager;
// build a list of GPU volume pointers - needed?
// copy points to the GPU
cxx::DevicePtr<Precision> posXGpu;
posXGpu.Allocate(npoints);
cxx::DevicePtr<Precision> posYGpu;
posYGpu.Allocate(npoints);
cxx::DevicePtr<Precision> posZGpu;
posZGpu.Allocate(npoints);
posXGpu.ToDevice(posX, npoints);
posYGpu.ToDevice(posY, npoints);
posZGpu.ToDevice(posZ, npoints);
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, npoints);
// copy directions to the GPU
cxx::DevicePtr<Precision> dirXGpu;
dirXGpu.Allocate(npoints);
cxx::DevicePtr<Precision> dirYGpu;
dirYGpu.Allocate(npoints);
cxx::DevicePtr<Precision> dirZGpu;
dirZGpu.Allocate(npoints);
dirXGpu.ToDevice(dirX, npoints);
dirYGpu.ToDevice(dirY, npoints);
dirZGpu.ToDevice(dirZ, npoints);
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, npoints);
cxx::DevicePtr<Precision> maxStepsGpu;
maxStepsGpu.Allocate(npoints);
maxStepsGpu.ToDevice(maxSteps, npoints);
// allocate space for kernel output
Precision *propStepsGpu = cxx::AllocateOnGpu<Precision>(npoints * sizeof(Precision));
// launch kernel in GPU
vecgeom::cuda::LaunchParameters launch(npoints);
vecgeom::cuda::Stopwatch timer;
timer.Start();
vecgeom::cuda::NavigationKernel<<<1, 1>>>(
gpu_ptr, gpu_out_ptr, depth, CudaManager::Instance().world_gpu(),
positionGpu, directionGpu, maxStepsGpu, 1, propStepsGpu );
cudaDeviceSynchronize();
Precision elapsedWarmup = timer.Stop();
printf("GPU config <<<1,1>>> - warm-up time: %f ms\n", 1000. * elapsedWarmup);
timer.Start();
vecgeom::cuda::NavigationKernel<<<launch.grid_size, launch.block_size>>>(
gpu_ptr, gpu_out_ptr, depth, CudaManager::Instance().world_gpu(), positionGpu, directionGpu, maxStepsGpu, npoints,
propStepsGpu);
cudaDeviceSynchronize();
Precision elapsedCuda = timer.Stop();
printf("GPU config <<<%i,%i>>> - navigation time: %f ms\n", launch.grid_size.x, launch.block_size.x, 1000. * elapsedCuda);
cxx::CopyFromGpu(propStepsGpu, propSteps, npoints * sizeof(Precision));
// cleanup
cxx::FreeFromGpu(propStepsGpu);
posXGpu.Deallocate();
posYGpu.Deallocate();
posZGpu.Deallocate();
dirXGpu.Deallocate();
dirYGpu.Deallocate();
dirZGpu.Deallocate();
return elapsedCuda;
}
} // global namespace
|
dc8d81899010d56af5fce75c10ff7040c8efa64a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
#include "opencv2/cudaarithm.hpp"
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
hipLaunchKernelGGL(( NearestNeighborKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
hipLaunchKernelGGL(( InterpolationKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
|
dc8d81899010d56af5fce75c10ff7040c8efa64a.cu
|
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
#include "opencv2/cudaarithm.hpp"
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
NearestNeighborKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
InterpolationKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
|
10c50833785bc86fc409d4009186d3c409448998.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/hip/UpSample.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/upsample_nearest3d.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_nearest3d_backward.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#endif
namespace at::native {
namespace {
#define MAX_THREADS 512
// Define a typedef to dispatch to nearest_neighbor_compute_source_index or
// nearest_neighbor_exact_compute_source_index
typedef int (*nn_compute_source_index_fn_t)(const float, int, int);
// Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or
// nearest_neighbor_exact_bw_compute_source_index
typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int);
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* output,
float depth_scale,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nn_compute_source_index_fn(depth_scale, dst_z, src_dim_d);
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nn_compute_source_index_fn(height_scale, dst_y, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
int src_x = nn_compute_source_index_fn(width_scale, dst_x, src_dim_w);
int src_idx = c * src_c_stride + src_z * src_dim_h * src_dim_w +
src_y * src_dim_w + src_x;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += dim_c * src_c_stride;
dst_idx += dim_c * dst_c_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
// Backward operation
template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i,
float depth_scale,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
// note that we do not want to clamp src_z to src_dim_z, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_z = nn_bw_compute_source_index_fn(depth_scale, dst_z, src_dim_d);
int src_z_up = nn_bw_compute_source_index_fn(depth_scale, dst_z+1, src_dim_d);
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
// note that we do not want to clamp src_y to src_dim_y, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_y = nn_bw_compute_source_index_fn(height_scale, dst_y, src_dim_h);
int src_y_up = nn_bw_compute_source_index_fn(height_scale, dst_y+1, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x = nn_bw_compute_source_index_fn(width_scale, dst_x, src_dim_w);
int src_x_up = nn_bw_compute_source_index_fn(width_scale, dst_x+1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int z = src_z; z < src_z_up; z++) {
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
int src_idx = b * dim_c * src_c_stride + c * src_c_stride +
z * src_dim_h * src_dim_w + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
template<nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest3d_out_cuda_template(
const Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
// TODO: remove this when the cuda kernel is updated to support the channels_last memory format.
// This is a temporary hack to prevent a silence correctness issue when calling this kernel
// with tensors in channels_last format.
auto output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_depth = input_.size(2);
int input_height = input_.size(3);
int input_width = input_.size(4);
Tensor input = input_.contiguous();
if (input.numel() == 0) {
return;
}
// upsample_nearest3d meta call makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte,input.scalar_type(), "upsample_nearest3d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output_c.data_ptr<scalar_t>();
const float depth_scale = compute_scales_value<float>(scales_d, input_depth, output_depth);
const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
hipLaunchKernelGGL(( upsample_nearest3d_out_frame<scalar_t, nn_compute_source_index_fn>)
, dim3(gdim), dim3(bdim), 0, stream,
idata,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
odata,
depth_scale,
height_scale,
width_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (!output.is_contiguous()) {
output.copy_(output_c);
}
}
template<nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
static void upsample_nearest3d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
__func__,
{grad_output_arg, grad_input_arg});
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_size[0];
int channels = input_size[1];
int input_depth = input_size[2];
int input_height = input_size[3];
int input_width = input_size[4];
Tensor grad_output = grad_output_.contiguous();
if (grad_input.numel() == 0) {
return;
}
// upsample_nearest3d meta call makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest3d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
float depth_scale = compute_scales_value_backwards<float>(scales_d, output_depth, input_depth);
float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height);
float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width);
hipLaunchKernelGGL(( upsample_nearest3d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn>)
, dim3(gdim), dim3(bdim), 0, stream,
odata,
nbatch,
channels,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
idata,
depth_scale,
height_scale,
width_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest3d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_nearest3d_out_cuda_template<nearest_neighbor_compute_source_index>(
output, input, output_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact3d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_nearest3d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_nearest3d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
upsample_nearest3d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact3d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
upsample_nearest3d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
} // namespace at::native
|
10c50833785bc86fc409d4009186d3c409448998.cu
|
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/upsample_nearest3d.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_nearest3d_backward.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#endif
namespace at::native {
namespace {
#define MAX_THREADS 512
// Define a typedef to dispatch to nearest_neighbor_compute_source_index or
// nearest_neighbor_exact_compute_source_index
typedef int (*nn_compute_source_index_fn_t)(const float, int, int);
// Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or
// nearest_neighbor_exact_bw_compute_source_index
typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int);
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* output,
float depth_scale,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nn_compute_source_index_fn(depth_scale, dst_z, src_dim_d);
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nn_compute_source_index_fn(height_scale, dst_y, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
int src_x = nn_compute_source_index_fn(width_scale, dst_x, src_dim_w);
int src_idx = c * src_c_stride + src_z * src_dim_h * src_dim_w +
src_y * src_dim_w + src_x;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += dim_c * src_c_stride;
dst_idx += dim_c * dst_c_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
// Backward operation
template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i,
float depth_scale,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
// note that we do not want to clamp src_z to src_dim_z, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_z = nn_bw_compute_source_index_fn(depth_scale, dst_z, src_dim_d);
int src_z_up = nn_bw_compute_source_index_fn(depth_scale, dst_z+1, src_dim_d);
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
// note that we do not want to clamp src_y to src_dim_y, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_y = nn_bw_compute_source_index_fn(height_scale, dst_y, src_dim_h);
int src_y_up = nn_bw_compute_source_index_fn(height_scale, dst_y+1, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x = nn_bw_compute_source_index_fn(width_scale, dst_x, src_dim_w);
int src_x_up = nn_bw_compute_source_index_fn(width_scale, dst_x+1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int z = src_z; z < src_z_up; z++) {
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
int src_idx = b * dim_c * src_c_stride + c * src_c_stride +
z * src_dim_h * src_dim_w + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
template<nn_compute_source_index_fn_t nn_compute_source_index_fn>
static void upsample_nearest3d_out_cuda_template(
const Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
// TODO: remove this when the cuda kernel is updated to support the channels_last memory format.
// This is a temporary hack to prevent a silence correctness issue when calling this kernel
// with tensors in channels_last format.
auto output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_depth = input_.size(2);
int input_height = input_.size(3);
int input_width = input_.size(4);
Tensor input = input_.contiguous();
if (input.numel() == 0) {
return;
}
// upsample_nearest3d meta call makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte,input.scalar_type(), "upsample_nearest3d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output_c.data_ptr<scalar_t>();
const float depth_scale = compute_scales_value<float>(scales_d, input_depth, output_depth);
const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
upsample_nearest3d_out_frame<scalar_t, nn_compute_source_index_fn>
<<<gdim, bdim, 0, stream>>>(
idata,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
odata,
depth_scale,
height_scale,
width_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (!output.is_contiguous()) {
output.copy_(output_c);
}
}
template<nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn>
static void upsample_nearest3d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
__func__,
{grad_output_arg, grad_input_arg});
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_size[0];
int channels = input_size[1];
int input_depth = input_size[2];
int input_height = input_size[3];
int input_width = input_size[4];
Tensor grad_output = grad_output_.contiguous();
if (grad_input.numel() == 0) {
return;
}
// upsample_nearest3d meta call makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{ceil_div(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest3d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
float depth_scale = compute_scales_value_backwards<float>(scales_d, output_depth, input_depth);
float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height);
float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width);
upsample_nearest3d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn>
<<<gdim, bdim, 0, stream>>>(
odata,
nbatch,
channels,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
idata,
depth_scale,
height_scale,
width_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest3d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_nearest3d_out_cuda_template<nearest_neighbor_compute_source_index>(
output, input, output_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact3d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_nearest3d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_nearest3d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
upsample_nearest3d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}
TORCH_IMPL_FUNC(_upsample_nearest_exact3d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_d,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
upsample_nearest3d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>(
grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
} // namespace at::native
|
tsne.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../src_prims/utils.h"
#include "distances.h"
#include "exact_kernels.h"
#include "tsne/tsne.h"
#include "utils.h"
#include "barnes_hut.h"
#include "exact_tsne.h"
namespace ML {
/**
* @brief Dimensionality reduction via TSNE using either Barnes Hut O(NlogN) or brute force O(N^2).
* @input param handle: The GPU handle.
* @input param X: The dataset you want to apply TSNE on.
* @output param Y: The final embedding. Will overwrite this internally.
* @input param n: Number of rows in data X.
* @input param p: Number of columns in data X.
* @input param dim: Number of output dimensions for embeddings Y.
* @input param n_neighbors: Number of nearest neighbors used.
* @input param theta: Float between 0 and 1. Tradeoff for speed (0) vs accuracy (1) for Barnes Hut only.
* @input param epssq: A tiny jitter to promote numerical stability.
* @input param perplexity: How many nearest neighbors are used during the construction of Pij.
* @input param perplexity_max_iter: Number of iterations used to construct Pij.
* @input param perplexity_tol: The small tolerance used for Pij to ensure numerical stability.
* @input param early_exaggeration: How much early pressure you want the clusters in TSNE to spread out more.
* @input param exaggeration_iter: How many iterations you want the early pressure to run for.
* @input param min_gain: Rounds up small gradient updates.
* @input param pre_learning_rate: The learning rate during the exaggeration phase.
* @input param post_learning_rate: The learning rate after the exaggeration phase.
* @input param max_iter: The maximum number of iterations TSNE should run for.
* @input param min_grad_norm: The smallest gradient norm TSNE should terminate on.
* @input param pre_momentum: The momentum used during the exaggeration phase.
* @input param post_momentum: The momentum used after the exaggeration phase.
* @input param random_state: Set this to -1 for pure random intializations or >= 0 for reproducible outputs.
* @input param verbose: Whether to print error messages or not.
* @input param intialize_embeddings: Whether to overwrite the current Y vector with random noise.
* @input param barnes_hut: Whether to use the fast Barnes Hut or use the slower exact version.
*/
void TSNE_fit(const cumlHandle &handle, const float *X, float *Y, const int n,
const int p, const int dim, int n_neighbors, const float theta,
const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
const bool verbose, const bool intialize_embeddings,
bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
if (dim > 2 and barnes_hut) {
barnes_hut = false;
printf(
"[Warn] Barnes Hut only works for dim == 2. Switching to exact "
"solution.\n");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
printf("[Warn] FAISS only supports maximum n_neighbors = 1023.\n");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
if (verbose) {
printf("[Info] Data size = (%d, %d) with dim = %d perplexity = %f\n", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
printf(
"[Warn] Perplexity should be within ranges (5, 50). Your results "
"might be a bit strange...\n");
if (n_neighbors < perplexity * 3.0f)
printf(
"[Warn] # of Nearest Neighbors should be at least 3 * perplexity. "
"Your results might be a bit strange...\n");
}
auto d_alloc = handle.getDeviceAllocator();
hipStream_t stream = handle.getStream();
START_TIMER;
//---------------------------------------------------
// Get distances
if (verbose) printf("[Info] Getting distances.\n");
float *distances =
(float *)d_alloc->allocate(sizeof(float) * n * n_neighbors, stream);
long *indices =
(long *)d_alloc->allocate(sizeof(long) * n * n_neighbors, stream);
TSNE::get_distances(X, n, p, indices, distances, n_neighbors, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
if (verbose)
printf("[Info] Now normalizing distances so exp(D) doesn't explode.\n");
TSNE::normalize_distances(n, distances, n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
if (verbose)
printf("[Info] Searching for optimal perplexity via bisection search.\n");
float *P =
(float *)d_alloc->allocate(sizeof(float) * n * n_neighbors, stream);
const float P_sum =
TSNE::perplexity_search(distances, P, perplexity, perplexity_max_iter,
perplexity_tol, n, n_neighbors, handle);
d_alloc->deallocate(distances, sizeof(float) * n * n_neighbors, stream);
if (verbose) printf("[Info] Perplexity sum = %f\n", P_sum);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix;
TSNE::symmetrize_perplexity(P, indices, n, n_neighbors, P_sum,
early_exaggeration, &COO_Matrix, stream, handle);
d_alloc->deallocate(P, sizeof(float) * n * n_neighbors, stream);
d_alloc->deallocate(indices, sizeof(long) * n * n_neighbors, stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals;
const int *COL = COO_Matrix.cols;
const int *ROW = COO_Matrix.rows;
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state,
verbose);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, verbose,
intialize_embeddings);
}
COO_Matrix.destroy();
}
} // namespace ML
|
tsne.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../src_prims/utils.h"
#include "distances.h"
#include "exact_kernels.h"
#include "tsne/tsne.h"
#include "utils.h"
#include "barnes_hut.h"
#include "exact_tsne.h"
namespace ML {
/**
* @brief Dimensionality reduction via TSNE using either Barnes Hut O(NlogN) or brute force O(N^2).
* @input param handle: The GPU handle.
* @input param X: The dataset you want to apply TSNE on.
* @output param Y: The final embedding. Will overwrite this internally.
* @input param n: Number of rows in data X.
* @input param p: Number of columns in data X.
* @input param dim: Number of output dimensions for embeddings Y.
* @input param n_neighbors: Number of nearest neighbors used.
* @input param theta: Float between 0 and 1. Tradeoff for speed (0) vs accuracy (1) for Barnes Hut only.
* @input param epssq: A tiny jitter to promote numerical stability.
* @input param perplexity: How many nearest neighbors are used during the construction of Pij.
* @input param perplexity_max_iter: Number of iterations used to construct Pij.
* @input param perplexity_tol: The small tolerance used for Pij to ensure numerical stability.
* @input param early_exaggeration: How much early pressure you want the clusters in TSNE to spread out more.
* @input param exaggeration_iter: How many iterations you want the early pressure to run for.
* @input param min_gain: Rounds up small gradient updates.
* @input param pre_learning_rate: The learning rate during the exaggeration phase.
* @input param post_learning_rate: The learning rate after the exaggeration phase.
* @input param max_iter: The maximum number of iterations TSNE should run for.
* @input param min_grad_norm: The smallest gradient norm TSNE should terminate on.
* @input param pre_momentum: The momentum used during the exaggeration phase.
* @input param post_momentum: The momentum used after the exaggeration phase.
* @input param random_state: Set this to -1 for pure random intializations or >= 0 for reproducible outputs.
* @input param verbose: Whether to print error messages or not.
* @input param intialize_embeddings: Whether to overwrite the current Y vector with random noise.
* @input param barnes_hut: Whether to use the fast Barnes Hut or use the slower exact version.
*/
void TSNE_fit(const cumlHandle &handle, const float *X, float *Y, const int n,
const int p, const int dim, int n_neighbors, const float theta,
const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
const bool verbose, const bool intialize_embeddings,
bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
if (dim > 2 and barnes_hut) {
barnes_hut = false;
printf(
"[Warn] Barnes Hut only works for dim == 2. Switching to exact "
"solution.\n");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
printf("[Warn] FAISS only supports maximum n_neighbors = 1023.\n");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
if (verbose) {
printf("[Info] Data size = (%d, %d) with dim = %d perplexity = %f\n", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
printf(
"[Warn] Perplexity should be within ranges (5, 50). Your results "
"might be a bit strange...\n");
if (n_neighbors < perplexity * 3.0f)
printf(
"[Warn] # of Nearest Neighbors should be at least 3 * perplexity. "
"Your results might be a bit strange...\n");
}
auto d_alloc = handle.getDeviceAllocator();
cudaStream_t stream = handle.getStream();
START_TIMER;
//---------------------------------------------------
// Get distances
if (verbose) printf("[Info] Getting distances.\n");
float *distances =
(float *)d_alloc->allocate(sizeof(float) * n * n_neighbors, stream);
long *indices =
(long *)d_alloc->allocate(sizeof(long) * n * n_neighbors, stream);
TSNE::get_distances(X, n, p, indices, distances, n_neighbors, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
if (verbose)
printf("[Info] Now normalizing distances so exp(D) doesn't explode.\n");
TSNE::normalize_distances(n, distances, n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
if (verbose)
printf("[Info] Searching for optimal perplexity via bisection search.\n");
float *P =
(float *)d_alloc->allocate(sizeof(float) * n * n_neighbors, stream);
const float P_sum =
TSNE::perplexity_search(distances, P, perplexity, perplexity_max_iter,
perplexity_tol, n, n_neighbors, handle);
d_alloc->deallocate(distances, sizeof(float) * n * n_neighbors, stream);
if (verbose) printf("[Info] Perplexity sum = %f\n", P_sum);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix;
TSNE::symmetrize_perplexity(P, indices, n, n_neighbors, P_sum,
early_exaggeration, &COO_Matrix, stream, handle);
d_alloc->deallocate(P, sizeof(float) * n * n_neighbors, stream);
d_alloc->deallocate(indices, sizeof(long) * n * n_neighbors, stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals;
const int *COL = COO_Matrix.cols;
const int *ROW = COO_Matrix.rows;
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state,
verbose);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, verbose,
intialize_embeddings);
}
COO_Matrix.destroy();
}
} // namespace ML
|
6564eb380d56b0fe5b6c17347ebcd28bfc0ec4aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ConvexHull.h"
/* Three points are a counter-clockwise turn if ccw > 0, clockwise if
ccw < 0, and collinear if ccw = 0 because ccw is a determinant that
gives the signed area of the triangle formed by p1, p2 and p3.
*/
__device__ float ccw(point_t* p1, point_t* p2, point_t* p3)
{
return (p2->x - p1->x)*(p3->y - p1->y) - (p2->y - p1->y)*(p3->x - p1->x);
}
__global__ void lowerHullonGPU(point_t* points, int npoints, point_t* out_hull1, int* out_hullsize1)
{
//Calculate global index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Number of elements handled by each thread
int elements_thread = npoints / (blockDim.x * gridDim.x);
//each thread has a different offset
int offset = idx*elements_thread;
//initialize the count to zero
out_hullsize1[idx] = 0;
// lower hull
for (int i = offset; i < offset+elements_thread; ++i)
{
/* while L contains at least two points and the sequence of last two points
of L and the point P[i] does not make a counter-clockwise turn:
remove the last point from L, append P[i] to L
*/
while (out_hullsize1[idx] >= 2 && ccw(&out_hull1[offset + out_hullsize1[idx] - 2], &out_hull1[offset + out_hullsize1[idx] - 1], &points[i]) <= 0)
{
--out_hullsize1[idx];
}
out_hull1[offset + (out_hullsize1[idx]++)] = points[i];
}
//out_hullsize1[idx]++;
}
__global__ void upperHullonGPU(point_t* points, int npoints, point_t* out_hull2, int* out_hullsize2)
{
//Calculate global index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Number of elements handled by each thread
int elements_thread = npoints / (blockDim.x * gridDim.x);
//Each thread has a different offset
int offset = idx*elements_thread;
//initialize the count to zero
out_hullsize2[idx] = 0;
//out_hull2[0] = points[npoints - 1]; //first point for upper hull
// upper hull
// remove the last point of each list (it's the same as the first
// point of the other list, so start from npoints-2)
/* while U contains at least two points and the sequence of last two points
of U and the point P[i] does not make a counter-clockwise turn:
remove the last point from U, append P[i] to U
*/
//t=k+1 to begin the upper hull - make a turn by considering the immediate point
for (int i = npoints-offset-1; i >= npoints-offset-elements_thread; --i)
{
while (out_hullsize2[idx] >= 2 && ccw(&out_hull2[offset + out_hullsize2[idx] - 2], &out_hull2[offset + out_hullsize2[idx] - 1], &points[i]) <= 0)
{
--out_hullsize2[idx];
}
out_hull2[offset + (out_hullsize2[idx]++)] = points[i];
}
//out_hullsize2[idx]++;
}
__global__ void mergeLowerHull(point_t *hull_part, int *part_size, int *i, int *j, int npoints)
{
//Calculate the global index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Number of threads
int num_threads = blockDim.x*gridDim.x;
//Number of elements seen by each thread, but size of individual convex hulls may be smaller
int elements_thread = npoints / num_threads;
if (idx < num_threads - 1)
{
//Consider two sub-hulls in each thread
int offset = idx*elements_thread;
int next_offset = (idx + 1)*elements_thread;
//Loop conditions
bool condition1 = true, condition2 = true;
//Points to construct tangent
point_t *a, *b;
//For lower hull
a = &hull_part[offset + part_size[idx] - 1];//right most part of left hull
b = &hull_part[next_offset];//left most part of right hull
//Construct tangent
while (condition1 || condition2)
{
condition1 = false;
condition2 = false;
while (ccw(b, a, (a - 1)) > 0)
{
a = (a - 1);
i[idx]++;
condition1 = true;
}
while (ccw(a, b, (b + 1)) <= 0)
{
b = (b + 1);
j[idx]++;
condition2 = true;
}
}
//printf("idx = %d and i = %d and j = %d\n", idx, i[idx], j[idx]);
}
}
__global__ void mergeUpperHull(point_t *hull_part, int *part_size, int *i, int *j, int npoints)
{
//Calculate the global index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
//Number of elements seen by each thread, but size of individual convex hulls is smaller
int elements_thread = npoints / num_threads;
if (idx < num_threads - 1)
{
//Consider two sub-hulls in each thread
int offset = idx*elements_thread;
int next_offset = (idx + 1)*elements_thread;
//Loop conditions
bool condition1 = true, condition2 = true;
//Points to construct tangent
point_t *a, *b;
//For upper hull
a = &hull_part[offset + part_size[idx] - 1];//left most part of right hull
b = &hull_part[next_offset];//right most part of left hull
//Construct tangent
while (condition1 || condition2)
{
condition1 = false;
condition2 = false;
while (ccw(b, a, (a - 1)) > 0)
{
a = (a - 1);
i[idx]++;
condition1 = true;
}
while (ccw(a, b, (b + 1)) <= 0)
{
b = (b + 1);
j[idx]++;
condition2 = true;
}
}
//printf("idx = %d and i = %d and j = %d\n", idx, i[idx], j[idx]);
}
}
// Selection sort used when depth gets too big or the number of elements drops
// below a threshold.
__device__ void selection_sort(point_t *points, int left, int right)
{
for (int i = left; i <= right; ++i)
{
point_t min_val = points[i];
int min_idx = i;
// Find the smallest value in the range [left, right].
for (int j = i + 1; j <= right; ++j)
{
point_t temp = points[j];
if ((temp.x < min_val.x) || ((temp.x == min_val.x) && (temp.y < min_val.y)))
{
min_idx = j;
min_val = temp;
}
}
// Swap the values.
if (i != min_idx)
{
points[min_idx] = points[i];
points[i] = min_val;
}
}
}
// Very basic quicksort algorithm, recursively launching the next level.
__global__ void quickSortOnGPU(point_t *points, int left, int right, int depth)
{
// If we're too deep or there are few elements left, we use an insertion sort...
if (depth >= MAX_DEPTH || right - left <= INSERTION_SORT)
{
selection_sort(points, left, right);
return;
}
point_t *lptr = points + left;
point_t *rptr = points + right;
point_t pivot = points[(left + right) / 2];
// Do the partitioning.
while (lptr <= rptr)
{
// Find the next left- and right-hand values to swap
point_t lval = *lptr;
point_t rval = *rptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while ((lval.x < pivot.x) || ((lval.x == pivot.x) && (lval.y < pivot.y)))
{
lptr++;
lval = *lptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while ((rval.x > pivot.x) || ((rval.x == pivot.x) && (rval.y > pivot.y)))
{
rptr--;
rval = *rptr;
}
// If the swap points are valid, do the swap!
if (lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
}
}
// Now the recursive part
int nright = rptr - points;
int nleft = lptr - points;
// Launch a new block to sort the left part.
if (left < (rptr - points))
{
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
hipLaunchKernelGGL(( quickSortOnGPU) , dim3(1), dim3(1), 0, s , points, left, nright, depth + 1);
hipStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr - points) < right)
{
hipStream_t s1;
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
hipLaunchKernelGGL(( quickSortOnGPU) , dim3(1), dim3(1), 0, s1 , points, nleft, right, depth + 1);
hipStreamDestroy(s1);
}
}
// Call the quicksort kernel from the host.
void runOnGPU(point_t *points, int npoints, point_t *out_hull, int *out_hullsize)
{
// Get device properties
int device_count = 0, device = -1;
hipGetDeviceCount(&device_count);
for (int i = 0; i < device_count; ++i)
{
hipDeviceProp_t properties; //instance of the structure
hipGetDeviceProperties(&properties, i);
if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5))
{
device = i;
cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl;
break;
}
cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
if (device == -1)
{
cerr << "Quicksort requires GPU devices with compute SM 3.5 or higher. Exiting..." << std::endl;
exit(EXIT_SUCCESS);
}
hipSetDevice(device);//Set the device to run computations
// Allocate GPU memory.
point_t *dev_points;
hipMalloc((void **)&dev_points, npoints * sizeof(point_t));
// Copy data to device memory
hipMemcpy(dev_points, points, npoints * sizeof(point_t), hipMemcpyHostToDevice);
// Prepare Cuda Dynamic Program for the maximum depth of MAX_DEPTH.
hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH);
// Launch on device
int left = 0;
int right = npoints - 1;
//cout << "Launching kernel on the GPU" << endl;
// Launch CUDA kernel to sort the points
hipLaunchKernelGGL(( quickSortOnGPU) , dim3(1), dim3(1) , 0, 0, dev_points, left, right, 0);
hipDeviceSynchronize(); // Blocks until the device has completed all preceding requested tasks
/*hipMemcpy(points, dev_points, npoints * sizeof(point_t), hipMemcpyDeviceToHost);
printf("The sorted points are:");
for (int i = 0; i < npoints; i++)
{
printf("%d and %d\n", points[i].x, points[i].y);
}*/
// Kernel parameters
int threads_block = 256;
int num_blocks = 1024;
int num_threads = threads_block*num_blocks;
// Convex hull parameters
int *out_hullSizeLower, *out_hullSizeUpper, *dev_out_hullSizeLower, *dev_out_hullSizeUpper;
int *mergeLowerEnd, *mergeLowerBegin, *dev_mergeLowerEnd, *dev_mergeLowerBegin;
int *mergeUpperEnd, *mergeUpperBegin, *dev_mergeUpperEnd, *dev_mergeUpperBegin;
point_t *out_hullLower, *out_hullUpper, *dev_out_hullLower, *dev_out_hullUpper;
//allocate memory on CPU
//out_hullLower = new point_t[SIZE];
//out_hullUpper = new point_t[SIZE];
out_hullLower = (point_t*)calloc(SIZE,sizeof(point_t));
out_hullUpper = (point_t*)calloc(SIZE,sizeof(point_t));
out_hullSizeLower = new int[num_threads];
out_hullSizeUpper = new int[num_threads];
mergeLowerEnd = (int*)calloc(num_threads - 1, sizeof(int));
mergeLowerBegin = (int*)calloc(num_threads - 1, sizeof(int));
mergeUpperEnd = (int*)calloc(num_threads - 1, sizeof(int));
mergeUpperBegin = (int*)calloc(num_threads - 1, sizeof(int));
//allocate memory on GPU
hipMalloc((void **)&dev_out_hullLower, SIZE * sizeof(point_t));
hipMalloc((void **)&dev_out_hullUpper, SIZE * sizeof(point_t));
hipMalloc((void **)&dev_out_hullSizeLower, num_threads*sizeof(int));
hipMalloc((void **)&dev_out_hullSizeUpper, num_threads*sizeof(int));
hipMalloc((void **)&dev_mergeLowerEnd, (num_threads - 1)*sizeof(int));
hipMalloc((void **)&dev_mergeLowerBegin, (num_threads - 1)*sizeof(int));
hipMalloc((void **)&dev_mergeUpperEnd, (num_threads - 1)*sizeof(int));
hipMalloc((void **)&dev_mergeUpperBegin, (num_threads - 1)*sizeof(int));
//Create events to measure time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//Get starting tick
hipEventRecord(start);
//copy host memory to device memory
hipMemcpy(dev_out_hullLower, out_hullLower, SIZE * sizeof(point_t), hipMemcpyHostToDevice);
hipMemcpy(dev_out_hullUpper, out_hullUpper, SIZE * sizeof(point_t), hipMemcpyHostToDevice);
hipMemcpy(dev_out_hullSizeLower, out_hullSizeLower, num_threads*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_out_hullSizeUpper, out_hullSizeUpper, num_threads*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_mergeLowerEnd, mergeLowerEnd, (num_threads-1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_mergeLowerBegin, mergeLowerBegin, (num_threads-1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_mergeUpperEnd, mergeUpperEnd, (num_threads - 1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_mergeUpperBegin, mergeUpperBegin, (num_threads - 1)*sizeof(int), hipMemcpyHostToDevice);
//Create streams
hipStream_t st1, st2;
hipStreamCreateWithFlags(&st1, hipStreamNonBlocking);
hipStreamCreateWithFlags(&st2, hipStreamNonBlocking);
// call the cuda kernels to compute convex hull
hipLaunchKernelGGL(( lowerHullonGPU) , dim3(num_blocks), dim3(threads_block), 0, st1 , dev_points, npoints, dev_out_hullLower, dev_out_hullSizeLower);
hipLaunchKernelGGL(( upperHullonGPU) , dim3(num_blocks), dim3(threads_block), 0, st2 , dev_points, npoints, dev_out_hullUpper, dev_out_hullSizeUpper);
hipDeviceSynchronize(); // Blocks until the device has completed all preceding requested tasks
hipMemcpy(out_hullLower, dev_out_hullLower, npoints * sizeof(point_t), hipMemcpyDeviceToHost);
hipMemcpy(out_hullUpper, dev_out_hullUpper, npoints * sizeof(point_t), hipMemcpyDeviceToHost);
/*cout << "The lower hull computed on GPU is: " << endl;
for (int i = 0; i < npoints; i++)
{
cout << out_hullLower[i].x << "\t" << out_hullLower[i].y << endl;
}
cout << "The upper hull computed on GPU is: " << endl;
for (int i = 0; i < npoints; i++)
{
cout << out_hullUpper[i].x << "\t" << out_hullUpper[i].y << endl;
}*/
hipLaunchKernelGGL(( mergeLowerHull) , dim3(num_blocks), dim3(threads_block), 0, st1 , dev_out_hullLower, dev_out_hullSizeLower, dev_mergeLowerEnd, dev_mergeLowerBegin, npoints);
hipLaunchKernelGGL(( mergeUpperHull) , dim3(num_blocks), dim3(threads_block), 0, st2 , dev_out_hullUpper, dev_out_hullSizeUpper, dev_mergeUpperEnd, dev_mergeUpperBegin, npoints);
hipDeviceSynchronize();
//Destroy streams
hipStreamDestroy(st1);
hipStreamDestroy(st2);
//Copy device memory to host memory
hipMemcpy(out_hullSizeLower, dev_out_hullSizeLower, num_threads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(out_hullSizeUpper, dev_out_hullSizeUpper, num_threads*sizeof(int), hipMemcpyDeviceToHost);
/*for (int i = 0; i < num_threads; i++)
{
cout << "The size of the Lower hull is: " << out_hullSizeLower[i] << endl;
cout << "The size of the Upper hull is: " << out_hullSizeUpper[i] << endl;
}*/
/*hipMemcpy(out_hullLower, dev_out_hullLower, *out_hullSizeLower * sizeof(point_t), hipMemcpyDeviceToHost);
hipMemcpy(out_hullUpper, dev_out_hullUpper, *out_hullSizeUpper * sizeof(point_t), hipMemcpyDeviceToHost);*/
hipMemcpy(mergeLowerEnd, dev_mergeLowerEnd, (num_threads - 1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mergeLowerBegin, dev_mergeLowerBegin, (num_threads - 1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mergeUpperEnd, dev_mergeUpperEnd, (num_threads - 1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mergeUpperBegin, dev_mergeUpperBegin, (num_threads - 1)*sizeof(int), hipMemcpyDeviceToHost);
/*for (int i = 0; i < num_threads - 1; i++)
{
cout << "mergeLowerEnd = " << mergeLowerEnd[i] << endl;
cout << "mergeLowerBegin = " << mergeLowerBegin[i] << endl;
cout << "mergeUpperEnd = " << mergeUpperEnd[i] << endl;
cout << "mergeUpperBegin = " << mergeUpperBegin[i] << endl;
}*/
//Write merged hull into an array
int k = 0;
int offset = npoints / num_threads;
//Writing lower hull
for (int i = 0; i < num_threads; i++)
{
if (i == 0)
{
for (int j = 0; j < (out_hullSizeLower[i] - mergeLowerEnd[i]); j++)
{
out_hull[k] = out_hullLower[j];
k++;
}
}
else if (i > 0 && i < num_threads-1)
{
for (int j = mergeLowerBegin[i-1]; j < (out_hullSizeLower[i] - mergeLowerEnd[i]); j++)
{
out_hull[k] = out_hullLower[i*offset + j];
k++;
}
}
else
{
for (int j = mergeLowerBegin[i-1]; j < out_hullSizeLower[i]; j++)
{
out_hull[k] = out_hullLower[i*offset + j];
k++;
}
}
}
cout << "The lower convex hull computed on GPU is: " << endl;
for (int i = 0; i < k; i++)
{
cout << out_hull[i].x << "\t" << out_hull[i].y << endl;
}
//Writing upper hull
for (int i = 0; i < num_threads; i++)
{
if (i == 0)
{
for (int j = 0; j < (out_hullSizeUpper[i] - mergeUpperEnd[i]); j++)
{
out_hull[k] = out_hullUpper[j];
k++;
}
}
else if (i > 0 && i < num_threads - 1)
{
for (int j = mergeUpperBegin[i-1]; j < (out_hullSizeUpper[i] - mergeUpperEnd[i]); j++)
{
out_hull[k] = out_hullUpper[i*offset + j];
k++;
}
}
else
{
for (int j = mergeUpperBegin[i-1]; j < out_hullSizeUpper[i]; j++)
{
out_hull[k] = out_hullUpper[i*offset + j];
k++;
}
}
}
cout << "The value of k is: " << k << endl;
cout << "The hull computed on GPU is: " << endl;
for (int i = 0; i < k; i++)
{
cout << out_hull[i].x << "\t" << out_hull[i].y << endl;
}
point_t *final_out_hull;
final_out_hull = new point_t[SIZE];
int final_hull_size;
quickSortOnCPU(out_hull, 0, k - 1);
convex_hull(out_hull, k, final_out_hull, &final_hull_size);
//get ending tick
hipEventRecord(stop);
hipEventSynchronize(stop);
//cout << "The size of final convex hull is: " << final_hull_size << endl;
//calculate elapsed time in ms
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "The convex hull computed on CPU+GPU is: " << endl;
for (int i = 0; i < final_hull_size; i++)
{
cout << final_out_hull[i].x << "\t" << final_out_hull[i].y << endl;
}
cout << "The elapsed time is: " << milliseconds << " ms" << endl;
// Free device memory
hipFree(dev_points);
hipFree(dev_out_hullLower);
hipFree(dev_out_hullUpper);
hipFree(dev_out_hullSizeLower);
hipFree(dev_out_hullSizeUpper);
hipFree(dev_mergeLowerEnd);
hipFree(dev_mergeLowerBegin);
hipFree(dev_mergeUpperEnd);
hipFree(dev_mergeUpperBegin);
//Free host memory
delete final_out_hull;
delete out_hullSizeLower;
delete out_hullSizeUpper;
free(out_hullLower);
free(out_hullUpper);
free(mergeLowerEnd);
free(mergeLowerBegin);
free(mergeUpperEnd);
free(mergeUpperBegin);
// Reset the device
hipDeviceReset();
}
|
6564eb380d56b0fe5b6c17347ebcd28bfc0ec4aa.cu
|
#include "ConvexHull.h"
/* Three points are a counter-clockwise turn if ccw > 0, clockwise if
ccw < 0, and collinear if ccw = 0 because ccw is a determinant that
gives the signed area of the triangle formed by p1, p2 and p3.
*/
__device__ float ccw(point_t* p1, point_t* p2, point_t* p3)
{
return (p2->x - p1->x)*(p3->y - p1->y) - (p2->y - p1->y)*(p3->x - p1->x);
}
__global__ void lowerHullonGPU(point_t* points, int npoints, point_t* out_hull1, int* out_hullsize1)
{
//Calculate global index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Number of elements handled by each thread
int elements_thread = npoints / (blockDim.x * gridDim.x);
//each thread has a different offset
int offset = idx*elements_thread;
//initialize the count to zero
out_hullsize1[idx] = 0;
// lower hull
for (int i = offset; i < offset+elements_thread; ++i)
{
/* while L contains at least two points and the sequence of last two points
of L and the point P[i] does not make a counter-clockwise turn:
remove the last point from L, append P[i] to L
*/
while (out_hullsize1[idx] >= 2 && ccw(&out_hull1[offset + out_hullsize1[idx] - 2], &out_hull1[offset + out_hullsize1[idx] - 1], &points[i]) <= 0)
{
--out_hullsize1[idx];
}
out_hull1[offset + (out_hullsize1[idx]++)] = points[i];
}
//out_hullsize1[idx]++;
}
__global__ void upperHullonGPU(point_t* points, int npoints, point_t* out_hull2, int* out_hullsize2)
{
//Calculate global index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Number of elements handled by each thread
int elements_thread = npoints / (blockDim.x * gridDim.x);
//Each thread has a different offset
int offset = idx*elements_thread;
//initialize the count to zero
out_hullsize2[idx] = 0;
//out_hull2[0] = points[npoints - 1]; //first point for upper hull
// upper hull
// remove the last point of each list (it's the same as the first
// point of the other list, so start from npoints-2)
/* while U contains at least two points and the sequence of last two points
of U and the point P[i] does not make a counter-clockwise turn:
remove the last point from U, append P[i] to U
*/
//t=k+1 to begin the upper hull - make a turn by considering the immediate point
for (int i = npoints-offset-1; i >= npoints-offset-elements_thread; --i)
{
while (out_hullsize2[idx] >= 2 && ccw(&out_hull2[offset + out_hullsize2[idx] - 2], &out_hull2[offset + out_hullsize2[idx] - 1], &points[i]) <= 0)
{
--out_hullsize2[idx];
}
out_hull2[offset + (out_hullsize2[idx]++)] = points[i];
}
//out_hullsize2[idx]++;
}
__global__ void mergeLowerHull(point_t *hull_part, int *part_size, int *i, int *j, int npoints)
{
//Calculate the global index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Number of threads
int num_threads = blockDim.x*gridDim.x;
//Number of elements seen by each thread, but size of individual convex hulls may be smaller
int elements_thread = npoints / num_threads;
if (idx < num_threads - 1)
{
//Consider two sub-hulls in each thread
int offset = idx*elements_thread;
int next_offset = (idx + 1)*elements_thread;
//Loop conditions
bool condition1 = true, condition2 = true;
//Points to construct tangent
point_t *a, *b;
//For lower hull
a = &hull_part[offset + part_size[idx] - 1];//right most part of left hull
b = &hull_part[next_offset];//left most part of right hull
//Construct tangent
while (condition1 || condition2)
{
condition1 = false;
condition2 = false;
while (ccw(b, a, (a - 1)) > 0)
{
a = (a - 1);
i[idx]++;
condition1 = true;
}
while (ccw(a, b, (b + 1)) <= 0)
{
b = (b + 1);
j[idx]++;
condition2 = true;
}
}
//printf("idx = %d and i = %d and j = %d\n", idx, i[idx], j[idx]);
}
}
__global__ void mergeUpperHull(point_t *hull_part, int *part_size, int *i, int *j, int npoints)
{
//Calculate the global index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
//Number of elements seen by each thread, but size of individual convex hulls is smaller
int elements_thread = npoints / num_threads;
if (idx < num_threads - 1)
{
//Consider two sub-hulls in each thread
int offset = idx*elements_thread;
int next_offset = (idx + 1)*elements_thread;
//Loop conditions
bool condition1 = true, condition2 = true;
//Points to construct tangent
point_t *a, *b;
//For upper hull
a = &hull_part[offset + part_size[idx] - 1];//left most part of right hull
b = &hull_part[next_offset];//right most part of left hull
//Construct tangent
while (condition1 || condition2)
{
condition1 = false;
condition2 = false;
while (ccw(b, a, (a - 1)) > 0)
{
a = (a - 1);
i[idx]++;
condition1 = true;
}
while (ccw(a, b, (b + 1)) <= 0)
{
b = (b + 1);
j[idx]++;
condition2 = true;
}
}
//printf("idx = %d and i = %d and j = %d\n", idx, i[idx], j[idx]);
}
}
// Selection sort used when depth gets too big or the number of elements drops
// below a threshold.
__device__ void selection_sort(point_t *points, int left, int right)
{
for (int i = left; i <= right; ++i)
{
point_t min_val = points[i];
int min_idx = i;
// Find the smallest value in the range [left, right].
for (int j = i + 1; j <= right; ++j)
{
point_t temp = points[j];
if ((temp.x < min_val.x) || ((temp.x == min_val.x) && (temp.y < min_val.y)))
{
min_idx = j;
min_val = temp;
}
}
// Swap the values.
if (i != min_idx)
{
points[min_idx] = points[i];
points[i] = min_val;
}
}
}
// Very basic quicksort algorithm, recursively launching the next level.
__global__ void quickSortOnGPU(point_t *points, int left, int right, int depth)
{
// If we're too deep or there are few elements left, we use an insertion sort...
if (depth >= MAX_DEPTH || right - left <= INSERTION_SORT)
{
selection_sort(points, left, right);
return;
}
point_t *lptr = points + left;
point_t *rptr = points + right;
point_t pivot = points[(left + right) / 2];
// Do the partitioning.
while (lptr <= rptr)
{
// Find the next left- and right-hand values to swap
point_t lval = *lptr;
point_t rval = *rptr;
// Move the left pointer as long as the pointed element is smaller than the pivot.
while ((lval.x < pivot.x) || ((lval.x == pivot.x) && (lval.y < pivot.y)))
{
lptr++;
lval = *lptr;
}
// Move the right pointer as long as the pointed element is larger than the pivot.
while ((rval.x > pivot.x) || ((rval.x == pivot.x) && (rval.y > pivot.y)))
{
rptr--;
rval = *rptr;
}
// If the swap points are valid, do the swap!
if (lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
}
}
// Now the recursive part
int nright = rptr - points;
int nleft = lptr - points;
// Launch a new block to sort the left part.
if (left < (rptr - points))
{
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
quickSortOnGPU <<< 1, 1, 0, s >>>(points, left, nright, depth + 1);
cudaStreamDestroy(s);
}
// Launch a new block to sort the right part.
if ((lptr - points) < right)
{
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
quickSortOnGPU <<< 1, 1, 0, s1 >>>(points, nleft, right, depth + 1);
cudaStreamDestroy(s1);
}
}
// Call the quicksort kernel from the host.
void runOnGPU(point_t *points, int npoints, point_t *out_hull, int *out_hullsize)
{
// Get device properties
int device_count = 0, device = -1;
cudaGetDeviceCount(&device_count);
for (int i = 0; i < device_count; ++i)
{
cudaDeviceProp properties; //instance of the structure
cudaGetDeviceProperties(&properties, i);
if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5))
{
device = i;
cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl;
break;
}
cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
if (device == -1)
{
cerr << "Quicksort requires GPU devices with compute SM 3.5 or higher. Exiting..." << std::endl;
exit(EXIT_SUCCESS);
}
cudaSetDevice(device);//Set the device to run computations
// Allocate GPU memory.
point_t *dev_points;
cudaMalloc((void **)&dev_points, npoints * sizeof(point_t));
// Copy data to device memory
cudaMemcpy(dev_points, points, npoints * sizeof(point_t), cudaMemcpyHostToDevice);
// Prepare Cuda Dynamic Program for the maximum depth of MAX_DEPTH.
cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH);
// Launch on device
int left = 0;
int right = npoints - 1;
//cout << "Launching kernel on the GPU" << endl;
// Launch CUDA kernel to sort the points
quickSortOnGPU <<< 1, 1 >>> (dev_points, left, right, 0);
cudaDeviceSynchronize(); // Blocks until the device has completed all preceding requested tasks
/*cudaMemcpy(points, dev_points, npoints * sizeof(point_t), cudaMemcpyDeviceToHost);
printf("The sorted points are:");
for (int i = 0; i < npoints; i++)
{
printf("%d and %d\n", points[i].x, points[i].y);
}*/
// Kernel parameters
int threads_block = 256;
int num_blocks = 1024;
int num_threads = threads_block*num_blocks;
// Convex hull parameters
int *out_hullSizeLower, *out_hullSizeUpper, *dev_out_hullSizeLower, *dev_out_hullSizeUpper;
int *mergeLowerEnd, *mergeLowerBegin, *dev_mergeLowerEnd, *dev_mergeLowerBegin;
int *mergeUpperEnd, *mergeUpperBegin, *dev_mergeUpperEnd, *dev_mergeUpperBegin;
point_t *out_hullLower, *out_hullUpper, *dev_out_hullLower, *dev_out_hullUpper;
//allocate memory on CPU
//out_hullLower = new point_t[SIZE];
//out_hullUpper = new point_t[SIZE];
out_hullLower = (point_t*)calloc(SIZE,sizeof(point_t));
out_hullUpper = (point_t*)calloc(SIZE,sizeof(point_t));
out_hullSizeLower = new int[num_threads];
out_hullSizeUpper = new int[num_threads];
mergeLowerEnd = (int*)calloc(num_threads - 1, sizeof(int));
mergeLowerBegin = (int*)calloc(num_threads - 1, sizeof(int));
mergeUpperEnd = (int*)calloc(num_threads - 1, sizeof(int));
mergeUpperBegin = (int*)calloc(num_threads - 1, sizeof(int));
//allocate memory on GPU
cudaMalloc((void **)&dev_out_hullLower, SIZE * sizeof(point_t));
cudaMalloc((void **)&dev_out_hullUpper, SIZE * sizeof(point_t));
cudaMalloc((void **)&dev_out_hullSizeLower, num_threads*sizeof(int));
cudaMalloc((void **)&dev_out_hullSizeUpper, num_threads*sizeof(int));
cudaMalloc((void **)&dev_mergeLowerEnd, (num_threads - 1)*sizeof(int));
cudaMalloc((void **)&dev_mergeLowerBegin, (num_threads - 1)*sizeof(int));
cudaMalloc((void **)&dev_mergeUpperEnd, (num_threads - 1)*sizeof(int));
cudaMalloc((void **)&dev_mergeUpperBegin, (num_threads - 1)*sizeof(int));
//Create events to measure time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Get starting tick
cudaEventRecord(start);
//copy host memory to device memory
cudaMemcpy(dev_out_hullLower, out_hullLower, SIZE * sizeof(point_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_out_hullUpper, out_hullUpper, SIZE * sizeof(point_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_out_hullSizeLower, out_hullSizeLower, num_threads*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_out_hullSizeUpper, out_hullSizeUpper, num_threads*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mergeLowerEnd, mergeLowerEnd, (num_threads-1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mergeLowerBegin, mergeLowerBegin, (num_threads-1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mergeUpperEnd, mergeUpperEnd, (num_threads - 1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mergeUpperBegin, mergeUpperBegin, (num_threads - 1)*sizeof(int), cudaMemcpyHostToDevice);
//Create streams
cudaStream_t st1, st2;
cudaStreamCreateWithFlags(&st1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&st2, cudaStreamNonBlocking);
// call the cuda kernels to compute convex hull
lowerHullonGPU <<< num_blocks, threads_block, 0, st1 >>> (dev_points, npoints, dev_out_hullLower, dev_out_hullSizeLower);
upperHullonGPU <<< num_blocks, threads_block, 0, st2 >>> (dev_points, npoints, dev_out_hullUpper, dev_out_hullSizeUpper);
cudaDeviceSynchronize(); // Blocks until the device has completed all preceding requested tasks
cudaMemcpy(out_hullLower, dev_out_hullLower, npoints * sizeof(point_t), cudaMemcpyDeviceToHost);
cudaMemcpy(out_hullUpper, dev_out_hullUpper, npoints * sizeof(point_t), cudaMemcpyDeviceToHost);
/*cout << "The lower hull computed on GPU is: " << endl;
for (int i = 0; i < npoints; i++)
{
cout << out_hullLower[i].x << "\t" << out_hullLower[i].y << endl;
}
cout << "The upper hull computed on GPU is: " << endl;
for (int i = 0; i < npoints; i++)
{
cout << out_hullUpper[i].x << "\t" << out_hullUpper[i].y << endl;
}*/
mergeLowerHull <<< num_blocks, threads_block, 0, st1 >>> (dev_out_hullLower, dev_out_hullSizeLower, dev_mergeLowerEnd, dev_mergeLowerBegin, npoints);
mergeUpperHull <<< num_blocks, threads_block, 0, st2 >>> (dev_out_hullUpper, dev_out_hullSizeUpper, dev_mergeUpperEnd, dev_mergeUpperBegin, npoints);
cudaDeviceSynchronize();
//Destroy streams
cudaStreamDestroy(st1);
cudaStreamDestroy(st2);
//Copy device memory to host memory
cudaMemcpy(out_hullSizeLower, dev_out_hullSizeLower, num_threads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(out_hullSizeUpper, dev_out_hullSizeUpper, num_threads*sizeof(int), cudaMemcpyDeviceToHost);
/*for (int i = 0; i < num_threads; i++)
{
cout << "The size of the Lower hull is: " << out_hullSizeLower[i] << endl;
cout << "The size of the Upper hull is: " << out_hullSizeUpper[i] << endl;
}*/
/*cudaMemcpy(out_hullLower, dev_out_hullLower, *out_hullSizeLower * sizeof(point_t), cudaMemcpyDeviceToHost);
cudaMemcpy(out_hullUpper, dev_out_hullUpper, *out_hullSizeUpper * sizeof(point_t), cudaMemcpyDeviceToHost);*/
cudaMemcpy(mergeLowerEnd, dev_mergeLowerEnd, (num_threads - 1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mergeLowerBegin, dev_mergeLowerBegin, (num_threads - 1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mergeUpperEnd, dev_mergeUpperEnd, (num_threads - 1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mergeUpperBegin, dev_mergeUpperBegin, (num_threads - 1)*sizeof(int), cudaMemcpyDeviceToHost);
/*for (int i = 0; i < num_threads - 1; i++)
{
cout << "mergeLowerEnd = " << mergeLowerEnd[i] << endl;
cout << "mergeLowerBegin = " << mergeLowerBegin[i] << endl;
cout << "mergeUpperEnd = " << mergeUpperEnd[i] << endl;
cout << "mergeUpperBegin = " << mergeUpperBegin[i] << endl;
}*/
//Write merged hull into an array
int k = 0;
int offset = npoints / num_threads;
//Writing lower hull
for (int i = 0; i < num_threads; i++)
{
if (i == 0)
{
for (int j = 0; j < (out_hullSizeLower[i] - mergeLowerEnd[i]); j++)
{
out_hull[k] = out_hullLower[j];
k++;
}
}
else if (i > 0 && i < num_threads-1)
{
for (int j = mergeLowerBegin[i-1]; j < (out_hullSizeLower[i] - mergeLowerEnd[i]); j++)
{
out_hull[k] = out_hullLower[i*offset + j];
k++;
}
}
else
{
for (int j = mergeLowerBegin[i-1]; j < out_hullSizeLower[i]; j++)
{
out_hull[k] = out_hullLower[i*offset + j];
k++;
}
}
}
cout << "The lower convex hull computed on GPU is: " << endl;
for (int i = 0; i < k; i++)
{
cout << out_hull[i].x << "\t" << out_hull[i].y << endl;
}
//Writing upper hull
for (int i = 0; i < num_threads; i++)
{
if (i == 0)
{
for (int j = 0; j < (out_hullSizeUpper[i] - mergeUpperEnd[i]); j++)
{
out_hull[k] = out_hullUpper[j];
k++;
}
}
else if (i > 0 && i < num_threads - 1)
{
for (int j = mergeUpperBegin[i-1]; j < (out_hullSizeUpper[i] - mergeUpperEnd[i]); j++)
{
out_hull[k] = out_hullUpper[i*offset + j];
k++;
}
}
else
{
for (int j = mergeUpperBegin[i-1]; j < out_hullSizeUpper[i]; j++)
{
out_hull[k] = out_hullUpper[i*offset + j];
k++;
}
}
}
cout << "The value of k is: " << k << endl;
cout << "The hull computed on GPU is: " << endl;
for (int i = 0; i < k; i++)
{
cout << out_hull[i].x << "\t" << out_hull[i].y << endl;
}
point_t *final_out_hull;
final_out_hull = new point_t[SIZE];
int final_hull_size;
quickSortOnCPU(out_hull, 0, k - 1);
convex_hull(out_hull, k, final_out_hull, &final_hull_size);
//get ending tick
cudaEventRecord(stop);
cudaEventSynchronize(stop);
//cout << "The size of final convex hull is: " << final_hull_size << endl;
//calculate elapsed time in ms
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "The convex hull computed on CPU+GPU is: " << endl;
for (int i = 0; i < final_hull_size; i++)
{
cout << final_out_hull[i].x << "\t" << final_out_hull[i].y << endl;
}
cout << "The elapsed time is: " << milliseconds << " ms" << endl;
// Free device memory
cudaFree(dev_points);
cudaFree(dev_out_hullLower);
cudaFree(dev_out_hullUpper);
cudaFree(dev_out_hullSizeLower);
cudaFree(dev_out_hullSizeUpper);
cudaFree(dev_mergeLowerEnd);
cudaFree(dev_mergeLowerBegin);
cudaFree(dev_mergeUpperEnd);
cudaFree(dev_mergeUpperBegin);
//Free host memory
delete final_out_hull;
delete out_hullSizeLower;
delete out_hullSizeUpper;
free(out_hullLower);
free(out_hullUpper);
free(mergeLowerEnd);
free(mergeLowerBegin);
free(mergeUpperEnd);
free(mergeUpperBegin);
// Reset the device
cudaDeviceReset();
}
|
2338a312d4f2f3d1a0cf194b8b9896cd46d687d2.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include "../common/api_entry.h" // XGBAPIThreadLocalEntry
#include "../common/threading_utils.h"
#include "../data/device_adapter.cuh"
#include "../data/proxy_dmatrix.h"
#include "c_api_error.h"
#include "c_api_utils.h"
#include "xgboost/c_api.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/learner.h"
namespace xgboost {
void XGBBuildInfoDevice(Json *p_info) {
auto &info = *p_info;
info["USE_ROCM"] = true;
std::vector<Json> v{Json{Integer{THRUST_MAJOR_VERSION}}, Json{Integer{THRUST_MINOR_VERSION}},
Json{Integer{THRUST_SUBMINOR_VERSION}}};
info["THRUST_VERSION"] = v;
v = {Json{Integer{dh::CUDAVersion().first}}, Json{Integer{dh::CUDAVersion().second}}};
info["TORCH_HIP_VERSION"] = v;
#if defined(XGBOOST_USE_NCCL)
info["USE_NCCL"] = Boolean{true};
v = {Json{Integer{NCCL_MAJOR}}, Json{Integer{NCCL_MINOR}}, Json{Integer{NCCL_PATCH}}};
info["NCCL_VERSION"] = v;
#else
info["USE_NCCL"] = Boolean{false};
#endif
#if defined(XGBOOST_USE_RMM)
info["USE_RMM"] = Boolean{true};
v = {Json{Integer{RMM_VERSION_MAJOR}}, Json{Integer{RMM_VERSION_MINOR}},
Json{Integer{RMM_VERSION_PATCH}}};
info["RMM_VERSION"] = v;
#else
info["USE_RMM"] = Boolean{false};
#endif
}
void XGBoostAPIGuard::SetGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
hipGetDevice(&device_id_);
}
void XGBoostAPIGuard::RestoreGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
hipSetDevice(device_id_);
}
} // namespace xgboost
using namespace xgboost; // NOLINT
XGB_DLL int XGDMatrixCreateFromCudaColumnar(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
xgboost_CHECK_C_ARG_PTR(c_json_config);
xgboost_CHECK_C_ARG_PTR(data);
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CudfAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
XGB_DLL int XGDMatrixCreateFromCudaArrayInterface(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CupyAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
int InplacePreidctCuda(BoosterHandle handle, char const *c_array_interface,
char const *c_json_config, std::shared_ptr<DMatrix> p_m,
xgboost::bst_ulong const **out_shape, xgboost::bst_ulong *out_dim,
const float **out_result) {
API_BEGIN();
CHECK_HANDLE();
if (!p_m) {
p_m.reset(new data::DMatrixProxy);
}
auto proxy = dynamic_cast<data::DMatrixProxy *>(p_m.get());
CHECK(proxy) << "Invalid input type for inplace predict.";
proxy->SetCUDAArray(c_array_interface);
auto config = Json::Load(StringView{c_json_config});
CHECK_EQ(get<Integer const>(config["cache_id"]), 0) << "Cache ID is not supported yet";
auto *learner = static_cast<Learner *>(handle);
HostDeviceVector<float> *p_predt{nullptr};
auto type = PredictionType(RequiredArg<Integer>(config, "type", __func__));
float missing = GetMissing(config);
learner->InplacePredict(p_m, type, missing, &p_predt,
RequiredArg<Integer>(config, "iteration_begin", __func__),
RequiredArg<Integer>(config, "iteration_end", __func__));
CHECK(p_predt);
CHECK(p_predt->DeviceCanRead() && !p_predt->HostCanRead());
auto &shape = learner->GetThreadLocal().prediction_shape;
size_t n_samples = p_m->Info().num_row_;
auto chunksize = n_samples == 0 ? 0 : p_predt->Size() / n_samples;
bool strict_shape = RequiredArg<Boolean>(config, "strict_shape", __func__);
xgboost_CHECK_C_ARG_PTR(out_result);
xgboost_CHECK_C_ARG_PTR(out_shape);
xgboost_CHECK_C_ARG_PTR(out_dim);
CalcPredictShape(strict_shape, type, n_samples, p_m->Info().num_col_, chunksize,
learner->Groups(), learner->BoostedRounds(), &shape, out_dim);
*out_shape = dmlc::BeginPtr(shape);
*out_result = p_predt->ConstDevicePointer();
API_END();
}
XGB_DLL int XGBoosterPredictFromCudaColumnar(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim,
const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
xgboost_CHECK_C_ARG_PTR(c_json_config);
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
return InplacePreidctCuda(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
XGB_DLL int XGBoosterPredictFromCudaArray(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim, const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
xgboost_CHECK_C_ARG_PTR(out_result);
return InplacePreidctCuda(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
|
2338a312d4f2f3d1a0cf194b8b9896cd46d687d2.cu
|
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include "../common/api_entry.h" // XGBAPIThreadLocalEntry
#include "../common/threading_utils.h"
#include "../data/device_adapter.cuh"
#include "../data/proxy_dmatrix.h"
#include "c_api_error.h"
#include "c_api_utils.h"
#include "xgboost/c_api.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/learner.h"
namespace xgboost {
void XGBBuildInfoDevice(Json *p_info) {
auto &info = *p_info;
info["USE_CUDA"] = true;
std::vector<Json> v{Json{Integer{THRUST_MAJOR_VERSION}}, Json{Integer{THRUST_MINOR_VERSION}},
Json{Integer{THRUST_SUBMINOR_VERSION}}};
info["THRUST_VERSION"] = v;
v = {Json{Integer{dh::CUDAVersion().first}}, Json{Integer{dh::CUDAVersion().second}}};
info["CUDA_VERSION"] = v;
#if defined(XGBOOST_USE_NCCL)
info["USE_NCCL"] = Boolean{true};
v = {Json{Integer{NCCL_MAJOR}}, Json{Integer{NCCL_MINOR}}, Json{Integer{NCCL_PATCH}}};
info["NCCL_VERSION"] = v;
#else
info["USE_NCCL"] = Boolean{false};
#endif
#if defined(XGBOOST_USE_RMM)
info["USE_RMM"] = Boolean{true};
v = {Json{Integer{RMM_VERSION_MAJOR}}, Json{Integer{RMM_VERSION_MINOR}},
Json{Integer{RMM_VERSION_PATCH}}};
info["RMM_VERSION"] = v;
#else
info["USE_RMM"] = Boolean{false};
#endif
}
void XGBoostAPIGuard::SetGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
cudaGetDevice(&device_id_);
}
void XGBoostAPIGuard::RestoreGPUAttribute() {
// Not calling `safe_cuda` to avoid unnecessary exception handling overhead.
// If errors, do nothing, assuming running on CPU only machine.
cudaSetDevice(device_id_);
}
} // namespace xgboost
using namespace xgboost; // NOLINT
XGB_DLL int XGDMatrixCreateFromCudaColumnar(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
xgboost_CHECK_C_ARG_PTR(c_json_config);
xgboost_CHECK_C_ARG_PTR(data);
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CudfAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
XGB_DLL int XGDMatrixCreateFromCudaArrayInterface(char const *data,
char const* c_json_config,
DMatrixHandle *out) {
API_BEGIN();
std::string json_str{data};
auto config = Json::Load(StringView{c_json_config});
float missing = GetMissing(config);
auto n_threads = OptionalArg<Integer, std::int64_t>(config, "nthread", 0);
data::CupyAdapter adapter(json_str);
*out =
new std::shared_ptr<DMatrix>(DMatrix::Create(&adapter, missing, n_threads));
API_END();
}
int InplacePreidctCuda(BoosterHandle handle, char const *c_array_interface,
char const *c_json_config, std::shared_ptr<DMatrix> p_m,
xgboost::bst_ulong const **out_shape, xgboost::bst_ulong *out_dim,
const float **out_result) {
API_BEGIN();
CHECK_HANDLE();
if (!p_m) {
p_m.reset(new data::DMatrixProxy);
}
auto proxy = dynamic_cast<data::DMatrixProxy *>(p_m.get());
CHECK(proxy) << "Invalid input type for inplace predict.";
proxy->SetCUDAArray(c_array_interface);
auto config = Json::Load(StringView{c_json_config});
CHECK_EQ(get<Integer const>(config["cache_id"]), 0) << "Cache ID is not supported yet";
auto *learner = static_cast<Learner *>(handle);
HostDeviceVector<float> *p_predt{nullptr};
auto type = PredictionType(RequiredArg<Integer>(config, "type", __func__));
float missing = GetMissing(config);
learner->InplacePredict(p_m, type, missing, &p_predt,
RequiredArg<Integer>(config, "iteration_begin", __func__),
RequiredArg<Integer>(config, "iteration_end", __func__));
CHECK(p_predt);
CHECK(p_predt->DeviceCanRead() && !p_predt->HostCanRead());
auto &shape = learner->GetThreadLocal().prediction_shape;
size_t n_samples = p_m->Info().num_row_;
auto chunksize = n_samples == 0 ? 0 : p_predt->Size() / n_samples;
bool strict_shape = RequiredArg<Boolean>(config, "strict_shape", __func__);
xgboost_CHECK_C_ARG_PTR(out_result);
xgboost_CHECK_C_ARG_PTR(out_shape);
xgboost_CHECK_C_ARG_PTR(out_dim);
CalcPredictShape(strict_shape, type, n_samples, p_m->Info().num_col_, chunksize,
learner->Groups(), learner->BoostedRounds(), &shape, out_dim);
*out_shape = dmlc::BeginPtr(shape);
*out_result = p_predt->ConstDevicePointer();
API_END();
}
XGB_DLL int XGBoosterPredictFromCudaColumnar(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim,
const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
xgboost_CHECK_C_ARG_PTR(c_json_config);
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
return InplacePreidctCuda(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
XGB_DLL int XGBoosterPredictFromCudaArray(BoosterHandle handle, char const *c_json_strs,
char const *c_json_config, DMatrixHandle m,
xgboost::bst_ulong const **out_shape,
xgboost::bst_ulong *out_dim, const float **out_result) {
std::shared_ptr<DMatrix> p_m{nullptr};
if (m) {
p_m = *static_cast<std::shared_ptr<DMatrix> *>(m);
}
xgboost_CHECK_C_ARG_PTR(out_result);
return InplacePreidctCuda(handle, c_json_strs, c_json_config, p_m, out_shape, out_dim,
out_result);
}
|
c674fc749cb8b12bd35cf77bb619243cfef4e6e0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "hoomd/VectorMath.h"
#include "hoomd/HOOMDMath.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
using namespace hoomd;
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param nwork Number of group members to process on this GPU
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
\param offset Offset of this GPU into group indices
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int nwork,
const Scalar4 *d_net_force,
const Scalar3 *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const bool aniso,
const Scalar deltaT,
unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r,
const unsigned int offset)
{
extern __shared__ char s_data[];
Scalar3 *s_gammas_r = (Scalar3 *)s_data;
Scalar *s_gammas = (Scalar *)(s_gammas_r + n_types);
if (!use_lambda)
{
// read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// read in the gamma_r, stored in s_gammas_r[0: n_type], which is s_gamma_r[0:n_type]
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int local_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (local_idx < nwork)
{
const unsigned int group_idx = local_idx + offset;
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
RandomGenerator rng(RNGIdentifier::TwoStepBD, seed, ptag, timestep);
UniformDistribution<Scalar> uniform(Scalar(-1), Scalar(1));
Scalar rx = uniform(rng);
Scalar ry = uniform(rng);
Scalar rz = uniform(rng);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
if (d_noiseless_t)
coeff = Scalar(0.0);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
NormalDistribution<Scalar> normal(sigma);
vel.x = normal(rng);
vel.y = normal(rng);
if (D > 2)
vel.z = normal(rng);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
// rotational random force and orientation quaternion updates
if (aniso)
{
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
// gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r
Scalar3 gamma_r = s_gammas_r[type_r];
if (gamma_r.x > 0 || gamma_r.y > 0 || gamma_r.z > 0)
{
vec3<Scalar> p_vec;
quat<Scalar> q(d_orientation[idx]);
vec3<Scalar> t(d_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// check if the shape is degenerate
bool x_zero, y_zero, z_zero;
x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON);
Scalar3 sigma_r = make_scalar3(fast::sqrt(Scalar(2.0)*gamma_r.x*T/deltaT),
fast::sqrt(Scalar(2.0)*gamma_r.y*T/deltaT),
fast::sqrt(Scalar(2.0)*gamma_r.z*T/deltaT));
if (d_noiseless_r)
sigma_r = make_scalar3(0,0,0);
// original Gaussian random torque
// Gaussian random distribution is preferred in terms of preserving the exact math
vec3<Scalar> bf_torque;
bf_torque.x = NormalDistribution<Scalar>(sigma_r.x)(rng);
bf_torque.y = NormalDistribution<Scalar>(sigma_r.y)(rng);
bf_torque.z = NormalDistribution<Scalar>(sigma_r.z)(rng);
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// use the damping by gamma_r and rotate back to lab frame
// For Future Updates: take special care when have anisotropic gamma_r
bf_torque = rotate(q, bf_torque);
if (D < 3)
{
bf_torque.x = 0;
bf_torque.y = 0;
t.x = 0;
t.y = 0;
}
// do the integration for quaternion
q += Scalar(0.5) * deltaT * ((t + bf_torque) / vec3<Scalar>(gamma_r)) * q ;
q = q * (Scalar(1.0) / slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
// draw a new random ang_mom for particle j in body frame
p_vec.x = NormalDistribution<Scalar>(fast::sqrt(T * I.x))(rng);
p_vec.y = NormalDistribution<Scalar>(fast::sqrt(T * I.y))(rng);
p_vec.z = NormalDistribution<Scalar>(fast::sqrt(T * I.z))(rng);
if (x_zero) p_vec.x = 0;
if (y_zero) p_vec.y = 0;
if (z_zero) p_vec.z = 0;
// !! Note this ang_mom isn't well-behaving in 2D,
// !! because may have effective non-zero ang_mom in x,y
// store ang_mom quaternion
quat<Scalar> p = Scalar(2.0) * q * p_vec;
d_angmom[idx] = quat_to_scalar4(p);
}
}
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
hipError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar3 *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const langevin_step_two_args& langevin_args,
const bool aniso,
const Scalar deltaT,
const unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r,
const GPUPartition& gpu_partition
)
{
unsigned int run_block_size = 256;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_brownian_step_one_kernel), dim3(grid), dim3(threads), (unsigned int)(sizeof(Scalar)*langevin_args.n_types + sizeof(Scalar3)*langevin_args.n_types), 0,
d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
nwork,
d_net_force,
d_gamma_r,
d_orientation,
d_torque,
d_inertia,
d_angmom,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
aniso,
deltaT,
D,
d_noiseless_t,
d_noiseless_r,
range.first);
}
return hipSuccess;
}
|
c674fc749cb8b12bd35cf77bb619243cfef4e6e0.cu
|
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "hoomd/VectorMath.h"
#include "hoomd/HOOMDMath.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
using namespace hoomd;
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param nwork Number of group members to process on this GPU
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
\param offset Offset of this GPU into group indices
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int nwork,
const Scalar4 *d_net_force,
const Scalar3 *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const bool aniso,
const Scalar deltaT,
unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r,
const unsigned int offset)
{
extern __shared__ char s_data[];
Scalar3 *s_gammas_r = (Scalar3 *)s_data;
Scalar *s_gammas = (Scalar *)(s_gammas_r + n_types);
if (!use_lambda)
{
// read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// read in the gamma_r, stored in s_gammas_r[0: n_type], which is s_gamma_r[0:n_type]
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int local_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (local_idx < nwork)
{
const unsigned int group_idx = local_idx + offset;
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
RandomGenerator rng(RNGIdentifier::TwoStepBD, seed, ptag, timestep);
UniformDistribution<Scalar> uniform(Scalar(-1), Scalar(1));
Scalar rx = uniform(rng);
Scalar ry = uniform(rng);
Scalar rz = uniform(rng);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
if (d_noiseless_t)
coeff = Scalar(0.0);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
NormalDistribution<Scalar> normal(sigma);
vel.x = normal(rng);
vel.y = normal(rng);
if (D > 2)
vel.z = normal(rng);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
// rotational random force and orientation quaternion updates
if (aniso)
{
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
// gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r
Scalar3 gamma_r = s_gammas_r[type_r];
if (gamma_r.x > 0 || gamma_r.y > 0 || gamma_r.z > 0)
{
vec3<Scalar> p_vec;
quat<Scalar> q(d_orientation[idx]);
vec3<Scalar> t(d_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// check if the shape is degenerate
bool x_zero, y_zero, z_zero;
x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON);
Scalar3 sigma_r = make_scalar3(fast::sqrt(Scalar(2.0)*gamma_r.x*T/deltaT),
fast::sqrt(Scalar(2.0)*gamma_r.y*T/deltaT),
fast::sqrt(Scalar(2.0)*gamma_r.z*T/deltaT));
if (d_noiseless_r)
sigma_r = make_scalar3(0,0,0);
// original Gaussian random torque
// Gaussian random distribution is preferred in terms of preserving the exact math
vec3<Scalar> bf_torque;
bf_torque.x = NormalDistribution<Scalar>(sigma_r.x)(rng);
bf_torque.y = NormalDistribution<Scalar>(sigma_r.y)(rng);
bf_torque.z = NormalDistribution<Scalar>(sigma_r.z)(rng);
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// use the damping by gamma_r and rotate back to lab frame
// For Future Updates: take special care when have anisotropic gamma_r
bf_torque = rotate(q, bf_torque);
if (D < 3)
{
bf_torque.x = 0;
bf_torque.y = 0;
t.x = 0;
t.y = 0;
}
// do the integration for quaternion
q += Scalar(0.5) * deltaT * ((t + bf_torque) / vec3<Scalar>(gamma_r)) * q ;
q = q * (Scalar(1.0) / slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
// draw a new random ang_mom for particle j in body frame
p_vec.x = NormalDistribution<Scalar>(fast::sqrt(T * I.x))(rng);
p_vec.y = NormalDistribution<Scalar>(fast::sqrt(T * I.y))(rng);
p_vec.z = NormalDistribution<Scalar>(fast::sqrt(T * I.z))(rng);
if (x_zero) p_vec.x = 0;
if (y_zero) p_vec.y = 0;
if (z_zero) p_vec.z = 0;
// !! Note this ang_mom isn't well-behaving in 2D,
// !! because may have effective non-zero ang_mom in x,y
// store ang_mom quaternion
quat<Scalar> p = Scalar(2.0) * q * p_vec;
d_angmom[idx] = quat_to_scalar4(p);
}
}
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
cudaError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar3 *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const langevin_step_two_args& langevin_args,
const bool aniso,
const Scalar deltaT,
const unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r,
const GPUPartition& gpu_partition
)
{
unsigned int run_block_size = 256;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
gpu_brownian_step_one_kernel<<< grid, threads, (unsigned int)(sizeof(Scalar)*langevin_args.n_types + sizeof(Scalar3)*langevin_args.n_types)>>>
(d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
nwork,
d_net_force,
d_gamma_r,
d_orientation,
d_torque,
d_inertia,
d_angmom,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
aniso,
deltaT,
D,
d_noiseless_t,
d_noiseless_r,
range.first);
}
return cudaSuccess;
}
|
b3b6c46515de5f4d974de3058ef5749108b020ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
//int Xcoord(int i, int XDim){
// int B = BLOCKSIZE*BLOCKSIZE;
// return (i%B)%BLOCKSIZE+((i/B)%(XDim/BLOCKSIZE))*BLOCKSIZE;
//}
//int Ycoord(int i, int XDim){
// int B = BLOCKSIZE*BLOCKSIZE;
// return (i%B)/BLOCKSIZE+((i/B)/(XDim/BLOCKSIZE))*BLOCKSIZE;
//}
//texture
//texture<float,1,hipReadModeElementType> texRef_f1;
texture<float,2,hipReadModeElementType> texRef_f1A;
texture<float,2,hipReadModeElementType> texRef_f2A;
texture<float,2,hipReadModeElementType> texRef_f3A;
texture<float,2,hipReadModeElementType> texRef_f4A;
texture<float,2,hipReadModeElementType> texRef_f5A;
texture<float,2,hipReadModeElementType> texRef_f6A;
texture<float,2,hipReadModeElementType> texRef_f7A;
texture<float,2,hipReadModeElementType> texRef_f8A;
texture<float,2,hipReadModeElementType> texRef_f1B;
texture<float,2,hipReadModeElementType> texRef_f2B;
texture<float,2,hipReadModeElementType> texRef_f3B;
texture<float,2,hipReadModeElementType> texRef_f4B;
texture<float,2,hipReadModeElementType> texRef_f5B;
texture<float,2,hipReadModeElementType> texRef_f6B;
texture<float,2,hipReadModeElementType> texRef_f7B;
texture<float,2,hipReadModeElementType> texRef_f8B;
#include <sys/time.h>
#include <time.h>
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__global__ void test(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B, //int pitch)
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
// int i = x+y*blockDim.x*gridDim.x;
// j = threadIdx.y*blockDim.x+threadIdx.x;//local block index (for shared mem)
// i = j+blockDim.x*blockDim.y*gridDim.x*blockIdx.y;
// i += blockDim.x*blockDim.y*blockIdx.x;//global memory index
//f0A[i] = f;
float f1,f2,f3,f4,f5,f6,f7,f8;
// f1=tex2D(texRef_f1,x+1,y );
// f2=tex2D(texRef_f2,x-1,y );
// f3=tex2D(texRef_f3,x ,y+1);
// f4=tex2D(texRef_f4,x ,y-1);
// f5=tex2D(texRef_f5,x+1,y+1);
// f7=tex2D(texRef_f7,x-1,y-1);
// f6=tex2D(texRef_f6,x-1,y+1);
// f8=tex2D(texRef_f8,x+1,y-1);
//if(image[i] == 0){
// f1 = tex2D(texRef_f1,x-1,y );
// f2 = tex2D(texRef_f2,x ,y-1);
// f3 = tex2D(texRef_f3,x+1,y );
// f4 = tex2D(texRef_f4,x ,y+1);
// f5 = tex2D(texRef_f5,x-1,y-1);
// f6 = tex2D(texRef_f6,x+1,y-1);
// f7 = tex2D(texRef_f7,x+1,y+1);
// f8 = tex2D(texRef_f8,x-1,y+1);
// f1 = f1A[x-1+y*pitch];
// f2 = f2A[x +(y-1)*pitch];
// f3 = f3A[x+1+y*pitch];
// f4 = f4A[x +(y+1)*pitch];
// f5 = f5A[x-1+(y-1)*pitch];
// f6 = f6A[x+1+(y-1)*pitch];
// f7 = f7A[x+1+(y+1)*pitch];
// f8 = f8A[x-1+(y+1)*pitch];
// f1=f1A[x+1+(y )*pitch];
// f2=f2A[x-1+(y )*pitch];
// f3=f3A[x +(y+1)*pitch];
// f4=f4A[x +(y-1)*pitch];
// f5=f5A[x+1+(y+1)*pitch];
// f7=f7A[x-1+(y-1)*pitch];
// f6=f6A[x-1+(y+1)*pitch];
// f8=f8A[x+1+(y-1)*pitch];
f1=f1A[x+y*pitch];
f2=f2A[x+y*pitch];
f3=f3A[x+y*pitch];
f4=f4A[x+y*pitch];
f5=f5A[x+y*pitch];
f6=f6A[x+y*pitch];
f7=f7A[x+y*pitch];
f8=f8A[x+y*pitch];
// f1B[x+y*pitch]=f1+1;
// f3B[x+y*pitch]=f3+1;
// f2B[x+y*pitch]=f2+1;
// f4B[x+y*pitch]=f4+1;
// f5B[x+y*pitch]=f5+1;
// f6B[x+y*pitch]=f6+1;
// f7B[x+y*pitch]=f7+1;
// f8B[x+y*pitch]=f8+1;
//}
// f1=f1A[x+y*pitch];
// f3=f3A[x+y*pitch];
// f2=f2A[x+y*pitch];
// f4=f4A[x+y*pitch];
// f5=f5A[x+y*pitch];
// f7=f7A[x+y*pitch];
// f6=f6A[x+y*pitch];
// f8=f8A[x+y*pitch];
f1B[x+y*pitch]=f1;
f2B[x+y*pitch]=f2;
f3B[x+y*pitch]=f3;
f4B[x+y*pitch]=f4;
f5B[x+y*pitch]=f5;
f6B[x+y*pitch]=f6;
f7B[x+y*pitch]=f7;
f8B[x+y*pitch]=f8;
}
__device__ void collide(float &f0, float &f1, float &f2,
float &f3, float &f4, float &f5,
float &f6, float &f7, float &f8, float rho, float u, float v, float omega)
{
float m1,m2,m4,m6,m7,m8;
m1 =-4.f*f0 - f1 - f2 - f3 - f4+ 2.f*f5+ 2.f*f6+ 2.f*f7+ 2.f*f8-(-2.0f*rho+3.0f*(u*u+v*v));
m2 = 4.f*f0 -2.f*f1 -2.f*f2 -2.f*f3 -2.f*f4+ f5+ f6+ f7+ f8-(rho-3.0f*(u*u+v*v)); //ep
m4 = -2.f*f1 + 2.f*f3 + f5 - f6 - f7+ f8-(-u);//qx_eq
m6 = -2.f*f2 + 2.f*f4+ f5+ f6 - f7 - f8-(-v);//qy_eq
m7 = f1 - f2+ f3 - f4 -(u*u-v*v);//pxx_eq
m8 = f5 - f6+ f7 - f8-(u*v);//pxy_eq
f0=f0-(-m1+m2)*0.11111111f;//(-4.f*(m1)/36.0f+4.f *(m2)/36.0f);
f1=f1-(-m1-2.0f*(m2+m4)+m7*omega*9.0f)*0.027777777f;
f2=f2-(-m1-2.f*m2-6.f*m6-m7*omega*9.0f)*0.027777777f;
f3=f3-(-m1-2.f*m2+6.f*m4+m7*omega*9.0f)*0.027777777f;
f4=f4-(-m1-2.f*m2+6.f*m6-m7*omega*9.0f)*0.027777777f;
f5=f5-(2.f*m1+m2+3.f*m4+3.f*m6+m8*omega*9.0f)*0.027777777f;
f6=f6-(2.f*m1+m2-3.f*m4+3.f*m6-m8*omega*9.0f)*0.027777777f;
f7=f7-(2.f*m1+m2-3.f*m4-3.f*m6+m8*omega*9.0f)*0.027777777f;
f8=f8-(2.f*m1+m2+3.f*m4-3.f*m6-m8*omega*9.0f)*0.027777777f;
// f0=f0-(-m1+m2)/9.0f;//(-4.f*(m1)/36.0f+4.f *(m2)/36.0f);
// f1=f1-(-m1-2.0f*(m2+m4)+m7*omega*9.0f)/36.0f;
// f2=f2-(-m1-2.f*m2-6.f*m6-m7*omega*9.0f)/36.0f;
// f3=f3-(-m1-2.f*m2+6.f*m4+m7*omega*9.0f)/36.0f;
// f4=f4-(-m1-2.f*m2+6.f*m6-m7*omega*9.0f)/36.0f;
// f5=f5-(2.f*m1+m2+3.f*m4+3.f*m6+m8*omega*9.0f)/36.0f;
// f6=f6-(2.f*m1+m2-3.f*m4+3.f*m6-m8*omega*9.0f)/36.0f;
// f7=f7-(2.f*m1+m2-3.f*m4-3.f*m6+m8*omega*9.0f)/36.0f;
// f8=f8-(2.f*m1+m2+3.f*m4-3.f*m6-m8*omega*9.0f)/36.0f;
// float feq;
// float usqr = u*u+v*v;
// feq = 4.0f/9.0f*(rho-1.5f*usqr);
// f0 = f0-omega*(f0-feq);
// feq = 1.0f/9.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// f1 = f1-omega*(f1-feq);
// feq = 1.0f/9.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// f2 = f2-omega*(f2-feq);
// feq = 1.0f/9.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// f3 = f3-omega*(f3-feq);
// feq = 1.0f/9.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f4 = f4-omega*(f4-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// f5 = f5-omega*(f5-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// f6 = f6-omega*(f6-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f7 = f7-omega*(f7-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// f8 = f8-omega*(f8-feq);
}
__global__ void mrt_d_textAB(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B,
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int i = x+y*blockDim.x*gridDim.x;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8;
//f0 = f0A[x+y*pitch];
// f2 = f4A[x +(y+1)*pitch];
// f4 = f2A[x +(y-1)*pitch];
// f1 = tex2D(texRef_f3A,x+1,y );
// f3 = tex2D(texRef_f1A,x-1,y );
// f5 = tex2D(texRef_f7A,x+1,y+1);
// f7 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f8A,x-1,y+1);
// f8 = tex2D(texRef_f6A,x+1,y-1);
// f1=tex2D(texRef_f3A,x+1,y );
// f3=tex2D(texRef_f1A,x-1,y );
// f2=tex2D(texRef_f4A,x ,y+1);
// f4=tex2D(texRef_f2A,x ,y-1);
// f5=tex2D(texRef_f7A,x+1,y+1);
// f7=tex2D(texRef_f5A,x-1,y-1);
// f6=tex2D(texRef_f8A,x-1,y+1);
// f8=tex2D(texRef_f6A,x+1,y-1);
//
f1 = f3A[x+1+(y )*pitch];
f3 = f1A[x-1+(y )*pitch];
f2 = f4A[x +(y+1)*pitch];
f4 = f2A[x +(y-1)*pitch];
f5 = f7A[x+1+(y+1)*pitch];
f7 = f5A[x-1+(y-1)*pitch];
f6 = f8A[x-1+(y+1)*pitch];
f8 = f6A[x+1+(y-1)*pitch];
// f0A[x+y*pitch] = f0A[x+y*pitch];
// f1B[x+y*pitch] = f1A[x+(y)*pitch];
// f3B[x+y*pitch] = f2A[x+(y)*pitch];
// f2B[x+y*pitch] = f3A[x+(y)*pitch];
// f4B[x+y*pitch] = f4A[x+(y)*pitch];
// f5B[x+y*pitch] = f5A[x+(y)*pitch];
// f7B[x+y*pitch] = f6A[x+(y)*pitch];
// f6B[x+y*pitch] = f7A[x+(y)*pitch];
// f8B[x+y*pitch] = f8A[x+(y)*pitch];
//f0A[x+y*pitch] = f0;
f2B[x+y*pitch] = f2;
f4B[x+y*pitch] = f4;
f1B[x+y*pitch] = f1;
f3B[x+y*pitch] = f3;
f5B[x+y*pitch] = f5;
f7B[x+y*pitch] = f7;
f6B[x+y*pitch] = f6;
f8B[x+y*pitch] = f8;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8;
float u,v,rho;//,feq,usqr;
// f0 = f0A[x +y*pitch];
// f2 = f2A[x +(y-1)*pitch];
// f4 = f4A[x +(y+1)*pitch];
// f1 = tex2D(texRef_f1A,x-1,y );
// f3 = tex2D(texRef_f3A,x+1,y );
// f5 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f6A,x+1,y-1);
// f7 = tex2D(texRef_f7A,x+1,y+1);
// f8 = tex2D(texRef_f8A,x-1,y+1);
// f1 = tex2D(texRef_f1A,x-1,y );
// f2 = tex2D(texRef_f2A,x ,y-1);
// f3 = tex2D(texRef_f3A,x+1,y );
// f4 = tex2D(texRef_f4A,x ,y+1);
// f5 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f6A,x+1,y-1);
// f7 = tex2D(texRef_f7A,x+1,y+1);
// f8 = tex2D(texRef_f8A,x-1,y+1);
// f0 = f0A[x +y*pitch];
f0 = f0A[x +y*pitch];
f1 = f1A[x-1+y*pitch];
f2 = f2A[x +(y-1)*pitch];
f3 = f3A[x+1+y*pitch];
f4 = f4A[x +(y+1)*pitch];
f5 = f5A[x-1+(y-1)*pitch];
f6 = f6A[x+1+(y-1)*pitch];
f7 = f7A[x+1+(y+1)*pitch];
f8 = f8A[x-1+(y+1)*pitch];
// f0 = f0A[x +y*pitch];
// f1 = f1A[x+y*pitch];
// f2 = f2A[x+y*pitch];
// f3 = f3A[x+y*pitch];
// f4 = f4A[x+y*pitch];
// f5 = f5A[x+y*pitch];
// f6 = f6A[x+y*pitch];
// f7 = f7A[x+y*pitch];
// f8 = f8A[x+y*pitch];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8;
u = f1-f3+f5-f6-f7+f8;
v = f2-f4+f5+f6-f7-f8;
if(im == 2){
u = 0.0f;
v = uMax;
rho = u+(f0+f2+f4+2.0f*f3+2.0f*f6+2.0f*f7);
//f1 = f3+4.0f*u/6.0f;
f1 = f3+u*0.66666667f;
f5 = f7-0.5f*(f2-f4)+v*0.5f+u*0.166666667f;
f8 = f6+0.5f*(f2-f4)-v*0.5f+u*0.166666667f;
}
else if(im == 3){//north
u = uMax;
v = 0.0f;
rho = -v+(f0+f1+f3+2.0f*f6+2.0f*f2+2.0f*f5);
//f4 = f2-4.0f*v/6.0f;
f4 = f2-v*0.66666667f;
f7 = f5+0.5f*(f1-f3)-u*0.5f+v*0.166666667f;
f8 = f6-0.5f*(f1-f3)+u*0.5f+v*0.166666667f;
}
collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,rho,u,v,omega);
f0A[y*pitch+x]=f0;
f1B[y*pitch+x]=f1;
f2B[y*pitch+x]=f2;
f3B[y*pitch+x]=f3;
f4B[y*pitch+x]=f4;
f5B[y*pitch+x]=f5;
f6B[y*pitch+x]=f6;
f7B[y*pitch+x]=f7;
f8B[y*pitch+x]=f8;
}
}
__global__ void mrt_d_textBA(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B,
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int i = x+y*blockDim.x*gridDim.x;
float u,v,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8;
//f0 = f0A[x+y*pitch];
// f2 = f4A[x +(y+1)*pitch];
// f4 = f2A[x +(y-1)*pitch];
// f1 = tex2D(texRef_f3B,x+1,y );
// f3 = tex2D(texRef_f1B,x-1,y );
// f5 = tex2D(texRef_f7B,x+1,y+1);
// f7 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f8B,x-1,y+1);
// f8 = tex2D(texRef_f6B,x+1,y-1);
//f0A[i] = f0A[i];
// f1=tex2D(texRef_f3B,x+1,y );
// f3=tex2D(texRef_f1B,x-1,y );
// f2=tex2D(texRef_f4B,x ,y+1);
// f4=tex2D(texRef_f2B,x ,y-1);
// f5=tex2D(texRef_f7B,x+1,y+1);
// f7=tex2D(texRef_f5B,x-1,y-1);
// f6=tex2D(texRef_f8B,x-1,y+1);
// f8=tex2D(texRef_f6B,x+1,y-1);
f1 = f3A[x+1+(y )*pitch];
f3 = f1A[x-1+(y )*pitch];
f2 = f4A[x +(y+1)*pitch];
f4 = f2A[x +(y-1)*pitch];
f5 = f7A[x+1+(y+1)*pitch];
f7 = f5A[x-1+(y-1)*pitch];
f6 = f8A[x-1+(y+1)*pitch];
f8 = f6A[x+1+(y-1)*pitch];
// f0A[x+y*pitch] = f0A[x+y*pitch];
// f1B[x+y*pitch] = f1A[x+(y)*pitch];
// f3B[x+y*pitch] = f2A[x+(y)*pitch];
// f2B[x+y*pitch] = f3A[x+(y)*pitch];
// f4B[x+y*pitch] = f4A[x+(y)*pitch];
// f5B[x+y*pitch] = f5A[x+(y)*pitch];
// f7B[x+y*pitch] = f6A[x+(y)*pitch];
// f6B[x+y*pitch] = f7A[x+(y)*pitch];
// f8B[x+y*pitch] = f8A[x+(y)*pitch];
//f0A[x+y*pitch] = f0;
f2B[x+y*pitch] = f2;
f4B[x+y*pitch] = f4;
f1B[x+y*pitch] = f1;
f3B[x+y*pitch] = f3;
f5B[x+y*pitch] = f5;
f7B[x+y*pitch] = f7;
f6B[x+y*pitch] = f6;
f8B[x+y*pitch] = f8;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8;
// f0 = f0A[x +y*pitch];
// f2 = f2B[x +(y-1)*pitch];
// f4 = f4B[x +(y+1)*pitch];
// f1 = tex2D(texRef_f1B,x-1,y );
// f3 = tex2D(texRef_f3B,x+1,y );
// f5 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f6B,x+1,y-1);
// f7 = tex2D(texRef_f7B,x+1,y+1);
// f8 = tex2D(texRef_f8B,x-1,y+1);
// f1 = tex2D(texRef_f1B,x-1,y );
// f2 = tex2D(texRef_f2B,x ,y-1);
// f3 = tex2D(texRef_f3B,x+1,y );
// f4 = tex2D(texRef_f4B,x ,y+1);
// f5 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f6B,x+1,y-1);
// f7 = tex2D(texRef_f7B,x+1,y+1);
// f8 = tex2D(texRef_f8B,x-1,y+1);
// f0 = f0A[x +y*pitch];
f0 = f0A[x +y*pitch];
f1 = f1A[x-1+y*pitch];
f2 = f2A[x +(y-1)*pitch];
f3 = f3A[x+1+y*pitch];
f4 = f4A[x +(y+1)*pitch];
f5 = f5A[x-1+(y-1)*pitch];
f6 = f6A[x+1+(y-1)*pitch];
f7 = f7A[x+1+(y+1)*pitch];
f8 = f8A[x-1+(y+1)*pitch];
// f0 = f0A[x +y*pitch];
// f1 = f1A[x+y*pitch];
// f2 = f2A[x+y*pitch];
// f3 = f3A[x+y*pitch];
// f4 = f4A[x+y*pitch];
// f5 = f5A[x+y*pitch];
// f6 = f6A[x+y*pitch];
// f7 = f7A[x+y*pitch];
// f8 = f8A[x+y*pitch];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8;
u = f1-f3+f5-f6-f7+f8;
v = f2-f4+f5+f6-f7-f8;
if(im == 2){
u = 0.0f;
v = uMax;
rho = u+(f0+f2+f4+2.0f*f3+2.0f*f6+2.0f*f7);
//f1 = f3+4.0f*u/6.0f;
f1 = f3+u*0.66666667f;
f5 = f7-0.5f*(f2-f4)+v*0.5f+u*0.166666667f;
f8 = f6+0.5f*(f2-f4)-v*0.5f+u*0.166666667f;
}
else if(im == 3){//north
u = uMax;
v = 0.0f;
rho = -v+(f0+f1+f3+2.0f*f6+2.0f*f2+2.0f*f5);
//f4 = f2-4.0f*v/6.0f;
f4 = f2-v*0.66666667f;
f7 = f5+0.5f*(f1-f3)-u*0.5f+v*0.166666667f;
f8 = f6-0.5f*(f1-f3)+u*0.5f+v*0.166666667f;
}
collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,rho,u,v,omega);
f0A[y*pitch+x]=f0;
f1B[y*pitch+x]=f1;
f2B[y*pitch+x]=f2;
f3B[y*pitch+x]=f3;
f4B[y*pitch+x]=f4;
f5B[y*pitch+x]=f5;
f6B[y*pitch+x]=f6;
f7B[y*pitch+x]=f7;
f8B[y*pitch+x]=f8;
}
}
__global__ void initialize(float *f0, float *f1, float *f2,
float *f3, float *f4, float *f5,
float *f6, float *f7, float *f8,
int n, int pitch)
{
int i;
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
i = threadIdx.y*blockDim.x+threadIdx.x;
i += blockDim.x*blockDim.y*gridDim.x*blockIdx.y;
i += blockDim.x*blockDim.y*blockIdx.x;
//i = y*blockDim.x+x;
//f1[y*pitch+x] = tex2D(texRef_f1,x,y);
float u,v,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
usqr = u*u+v*v;
feq = 4.0f/9.0f*(rho-1.5f*usqr);
f0[i] = feq;
feq = 1.0f/9.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[y*pitch+x] = feq;
}
int main(int argc, char *argv[])
{
float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h;
float *f0_d, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA;
float *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB;
int *image_d, *image_h;
ofstream output;
output.open ("LBM1_out.dat");
size_t memsize, memsize_int;
size_t pitch;
int i, tMax, n, nBlocks, xDim, yDim;
float Re, omega, uMax, CharLength;
int BLOCKSIZEx = 128;
int BLOCKSIZEy = 1;
xDim = 1024;//32;
yDim = 1024;//32;
tMax = 500;
Re = 500.f;//100.f;
uMax = 0.08f;
CharLength = xDim-2.f;
omega = 1.0f/(3.0f*(uMax*CharLength/Re)+0.5f);
cout<<"omega: "<<omega<<endl;
nBlocks = (xDim/BLOCKSIZEx+xDim%BLOCKSIZEx)*(yDim/BLOCKSIZEy+yDim%BLOCKSIZEy);
int B = BLOCKSIZEx*BLOCKSIZEy;
n = nBlocks*B;//block*dimx*dimy
//CUT_DEVICE_INIT(argc,argv);
memsize = n*sizeof(float);
memsize_int = n*sizeof(int);
f0_h = (float *)malloc(memsize);
f1_h = (float *)malloc(memsize);
f2_h = (float *)malloc(memsize);
f3_h = (float *)malloc(memsize);
f4_h = (float *)malloc(memsize);
f5_h = (float *)malloc(memsize);
f6_h = (float *)malloc(memsize);
f7_h = (float *)malloc(memsize);
f8_h = (float *)malloc(memsize);
image_h = (int *)malloc(memsize_int);
//hipMalloc((void **) &f0_d, memsize);
//CUDA_SAFE_CALL(hipMalloc((void **) &f1_d, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f1_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f2_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f3_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f4_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f5_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f6_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f7_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f8_dA, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f1_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f2_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f3_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f4_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f5_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f6_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f7_dB, memsize));
// CUDA_SAFE_CALL(hipMalloc((void **) &f8_dB, memsize));
// pitch = xDim*sizeof(float);
hipMallocPitch((void **) &f0_d , &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f1_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f2_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f3_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f4_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f5_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f6_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f7_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f8_dA, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f1_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f2_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f3_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f4_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f5_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f6_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f7_dB, &pitch, xDim*sizeof(float), yDim);
hipMallocPitch((void **) &f8_dB, &pitch, xDim*sizeof(float), yDim);
//CUDA_SAFE_CALL(hipMalloc((void **) &f2_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f3_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f4_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f5_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f6_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f7_d, memsize));
//CUDA_SAFE_CALL(hipMalloc((void **) &f8_d, memsize));
hipMalloc((void **) &image_d, memsize_int);
for (i = 0; i < n; i++)
{
int x = i%xDim;
int y = i/xDim;
f0_h[i] = i;
f1_h[i] = n-i;
f2_h[i] = i;
f3_h[i] = i;
f4_h[i] = i;
f5_h[i] = i;
f6_h[i] = i;
f7_h[i] = i;
f8_h[i] = i;
image_h[i] = 0;
if(x < 1) image_h[i] = 1;//DirichletWest
if(x > xDim-2) image_h[i] = 1;//BB
if(y < 1) image_h[i] = 1;//BB
if(y > yDim-2) image_h[i] = 3;//BB
}
//hipMemcpy(f0_d, f0_h, memsize, hipMemcpyHostToDevice);
//CUDA_SAFE_CALL(hipMemcpy(f1_d, f1_h, memsize, hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f1_dA,f1_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f2_dA,f2_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f3_dA,f3_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f4_dA,f4_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f5_dA,f5_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f6_dA,f6_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f7_dA,f7_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f8_dA,f8_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f1_dB,f1_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f2_dB,f2_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f3_dB,f3_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f4_dB,f4_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f5_dB,f5_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f6_dB,f6_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f7_dB,f7_h,memsize,hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(f8_dB,f8_h,memsize,hipMemcpyHostToDevice));
hipMemcpy2D(f0_d ,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f1_dA,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f2_dA,pitch,f2_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f3_dA,pitch,f3_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f4_dA,pitch,f4_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f5_dA,pitch,f5_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f6_dA,pitch,f6_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f7_dA,pitch,f7_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f8_dA,pitch,f8_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f1_dB,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f2_dB,pitch,f2_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f3_dB,pitch,f3_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f4_dB,pitch,f4_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f5_dB,pitch,f5_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f6_dB,pitch,f6_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f7_dB,pitch,f7_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
hipMemcpy2D(f8_dB,pitch,f8_h,xDim*sizeof(float),xDim*sizeof(float),yDim,hipMemcpyHostToDevice);
//CUDA_SAFE_CALL(hipMemcpy(f2_d, f2_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f3_d, f3_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f4_d, f4_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f5_d, f5_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f6_d, f6_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f7_d, f7_h, memsize, hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(f8_d, f8_h, memsize, hipMemcpyHostToDevice));
hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
//cout<<(int)(pitch/sizeof(float))<<endl;
dim3 threads(BLOCKSIZEx, BLOCKSIZEy);
dim3 grid(xDim/BLOCKSIZEx,yDim/BLOCKSIZEy);
cout<<"nBlocks:"<<nBlocks<<endl;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f1A.filterMode = hipFilterModePoint;
texRef_f2A.filterMode = hipFilterModePoint;
texRef_f3A.filterMode = hipFilterModePoint;
texRef_f4A.filterMode = hipFilterModePoint;
texRef_f5A.filterMode = hipFilterModePoint;
texRef_f6A.filterMode = hipFilterModePoint;
texRef_f7A.filterMode = hipFilterModePoint;
texRef_f8A.filterMode = hipFilterModePoint;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f1B.filterMode = hipFilterModePoint;
texRef_f2B.filterMode = hipFilterModePoint;
texRef_f3B.filterMode = hipFilterModePoint;
texRef_f4B.filterMode = hipFilterModePoint;
texRef_f5B.filterMode = hipFilterModePoint;
texRef_f6B.filterMode = hipFilterModePoint;
texRef_f7B.filterMode = hipFilterModePoint;
texRef_f8B.filterMode = hipFilterModePoint;
hipLaunchKernelGGL(( initialize), dim3(grid), dim3(threads), 0, 0, f0_d, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA,
n,(int)(pitch/sizeof(float)));
hipBindTexture2D(0,&texRef_f1A, f1_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f2A, f2_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f3A, f3_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f4A, f4_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f5A, f5_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f6A, f6_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f7A, f7_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f8A, f8_dA,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f1B, f1_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f2B, f2_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f3B, f3_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f4B, f4_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f5B, f5_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f6B, f6_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f7B, f7_dB,&desc,xDim,yDim,pitch);
hipBindTexture2D(0,&texRef_f8B, f8_dB,&desc,xDim,yDim,pitch);
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<tMax; t=t+2){
//for(int t = 0; t<tMax; t=t+1){
//mrt_d<<<grid, threads>>>(f0_d,f1_d,f2_d,f3_d,f4_d,f5_d,f6_d,f7_d,f8_d,n,image_d,omega,uMax);
//test<<<grid, threads>>>(f0_d,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
hipLaunchKernelGGL(( mrt_d_textAB), dim3(grid), dim3(threads), 0, 0, f0_d,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
n,image_d,omega,uMax,(int)(pitch/sizeof(float)));
//test<<<grid, threads>>>(f0_d,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
hipLaunchKernelGGL(( mrt_d_textBA), dim3(grid), dim3(threads), 0, 0, f0_d,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
n,image_d,omega,uMax,(int)(pitch/sizeof(float)));
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
cout<<"Time taken for main kernel: "<<restime<<" ("<<double(xDim*yDim*double(tMax/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<xDim<<","<<yDim<<","<<tMax<<","<<restime<<endl;
hipUnbindTexture(texRef_f1A);
hipUnbindTexture(texRef_f2A);
hipUnbindTexture(texRef_f3A);
hipUnbindTexture(texRef_f4A);
hipUnbindTexture(texRef_f5A);
hipUnbindTexture(texRef_f6A);
hipUnbindTexture(texRef_f7A);
hipUnbindTexture(texRef_f8A);
hipUnbindTexture(texRef_f1B);
hipUnbindTexture(texRef_f2B);
hipUnbindTexture(texRef_f3B);
hipUnbindTexture(texRef_f4B);
hipUnbindTexture(texRef_f5B);
hipUnbindTexture(texRef_f6B);
hipUnbindTexture(texRef_f7B);
hipUnbindTexture(texRef_f8B);
//CUT_CHECK_ERROR("Kernel execution failed");
//hipMemcpy(f0_h, f0_d, memsize, hipMemcpyDeviceToHost);
//CUDA_SAFE_CALL(hipMemcpy(f1_h, f1_d, memsize, hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f1_h,f1_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f2_h,f2_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f3_h,f3_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f4_h,f4_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f5_h,f5_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f6_h,f6_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f7_h,f7_dB,memsize,hipMemcpyDeviceToHost));
// CUDA_SAFE_CALL(hipMemcpy(f8_h,f8_dB,memsize,hipMemcpyDeviceToHost));
hipMemcpy2D(f0_h,xDim*sizeof(float),f0_d ,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f1_h,xDim*sizeof(float),f1_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f2_h,xDim*sizeof(float),f2_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f3_h,xDim*sizeof(float),f3_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f4_h,xDim*sizeof(float),f4_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f5_h,xDim*sizeof(float),f5_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f6_h,xDim*sizeof(float),f6_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f7_h,xDim*sizeof(float),f7_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
hipMemcpy2D(f8_h,xDim*sizeof(float),f8_dB,pitch,xDim*sizeof(float),yDim,hipMemcpyDeviceToHost);
//CUDA_SAFE_CALL(hipMemcpy(f2_h, f2_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f3_h, f3_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f4_h, f4_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f5_h, f5_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f6_h, f6_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f7_h, f7_d, memsize, hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(f8_h, f8_d, memsize, hipMemcpyDeviceToHost));
output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<xDim<<", J="<<yDim<<"\n";
// for(i = 0; i<n; i++)
// {
int row = 0;
int col = 0;
i = 0;
//int rowB, colB;
//float xcoord, ycoord;
float rho, u, v;
rho = 0;
u = 0;
v = 0;
for(row = 0; row<yDim; row++){
for(col = 0; col<xDim; col++){
i = row*xDim+col;
rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i];
u = f1_h[i]-f3_h[i]+f5_h[i]-f6_h[i]-f7_h[i]+f8_h[i];
v = f2_h[i]-f4_h[i]+f5_h[i]+f6_h[i]-f7_h[i]-f8_h[i];
output<<col<<", "<<row<<", "<<u<<","<<v<<","<<rho<<endl;
}
}
free(f0_h);
free(f1_h);
free(f2_h);
free(f3_h);
free(f4_h);
free(f5_h);
free(f6_h);
free(f7_h);
free(f8_h);
output.close();
hipFree(f0_d);
hipFree(f1_dA);
hipFree(f2_dA);
hipFree(f3_dA);
hipFree(f4_dA);
hipFree(f5_dA);
hipFree(f6_dA);
hipFree(f7_dA);
hipFree(f8_dA);
hipFree(f1_dB);
hipFree(f2_dB);
hipFree(f3_dB);
hipFree(f4_dB);
hipFree(f5_dB);
hipFree(f6_dB);
hipFree(f7_dB);
hipFree(f8_dB);
hipFree(image_d);
return(0);
}
|
b3b6c46515de5f4d974de3058ef5749108b020ba.cu
|
#include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
//int Xcoord(int i, int XDim){
// int B = BLOCKSIZE*BLOCKSIZE;
// return (i%B)%BLOCKSIZE+((i/B)%(XDim/BLOCKSIZE))*BLOCKSIZE;
//}
//int Ycoord(int i, int XDim){
// int B = BLOCKSIZE*BLOCKSIZE;
// return (i%B)/BLOCKSIZE+((i/B)/(XDim/BLOCKSIZE))*BLOCKSIZE;
//}
//texture
//texture<float,1,cudaReadModeElementType> texRef_f1;
texture<float,2,cudaReadModeElementType> texRef_f1A;
texture<float,2,cudaReadModeElementType> texRef_f2A;
texture<float,2,cudaReadModeElementType> texRef_f3A;
texture<float,2,cudaReadModeElementType> texRef_f4A;
texture<float,2,cudaReadModeElementType> texRef_f5A;
texture<float,2,cudaReadModeElementType> texRef_f6A;
texture<float,2,cudaReadModeElementType> texRef_f7A;
texture<float,2,cudaReadModeElementType> texRef_f8A;
texture<float,2,cudaReadModeElementType> texRef_f1B;
texture<float,2,cudaReadModeElementType> texRef_f2B;
texture<float,2,cudaReadModeElementType> texRef_f3B;
texture<float,2,cudaReadModeElementType> texRef_f4B;
texture<float,2,cudaReadModeElementType> texRef_f5B;
texture<float,2,cudaReadModeElementType> texRef_f6B;
texture<float,2,cudaReadModeElementType> texRef_f7B;
texture<float,2,cudaReadModeElementType> texRef_f8B;
#include <sys/time.h>
#include <time.h>
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__global__ void test(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B, //int pitch)
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
// int i = x+y*blockDim.x*gridDim.x;
// j = threadIdx.y*blockDim.x+threadIdx.x;//local block index (for shared mem)
// i = j+blockDim.x*blockDim.y*gridDim.x*blockIdx.y;
// i += blockDim.x*blockDim.y*blockIdx.x;//global memory index
//f0A[i] = f;
float f1,f2,f3,f4,f5,f6,f7,f8;
// f1=tex2D(texRef_f1,x+1,y );
// f2=tex2D(texRef_f2,x-1,y );
// f3=tex2D(texRef_f3,x ,y+1);
// f4=tex2D(texRef_f4,x ,y-1);
// f5=tex2D(texRef_f5,x+1,y+1);
// f7=tex2D(texRef_f7,x-1,y-1);
// f6=tex2D(texRef_f6,x-1,y+1);
// f8=tex2D(texRef_f8,x+1,y-1);
//if(image[i] == 0){
// f1 = tex2D(texRef_f1,x-1,y );
// f2 = tex2D(texRef_f2,x ,y-1);
// f3 = tex2D(texRef_f3,x+1,y );
// f4 = tex2D(texRef_f4,x ,y+1);
// f5 = tex2D(texRef_f5,x-1,y-1);
// f6 = tex2D(texRef_f6,x+1,y-1);
// f7 = tex2D(texRef_f7,x+1,y+1);
// f8 = tex2D(texRef_f8,x-1,y+1);
// f1 = f1A[x-1+y*pitch];
// f2 = f2A[x +(y-1)*pitch];
// f3 = f3A[x+1+y*pitch];
// f4 = f4A[x +(y+1)*pitch];
// f5 = f5A[x-1+(y-1)*pitch];
// f6 = f6A[x+1+(y-1)*pitch];
// f7 = f7A[x+1+(y+1)*pitch];
// f8 = f8A[x-1+(y+1)*pitch];
// f1=f1A[x+1+(y )*pitch];
// f2=f2A[x-1+(y )*pitch];
// f3=f3A[x +(y+1)*pitch];
// f4=f4A[x +(y-1)*pitch];
// f5=f5A[x+1+(y+1)*pitch];
// f7=f7A[x-1+(y-1)*pitch];
// f6=f6A[x-1+(y+1)*pitch];
// f8=f8A[x+1+(y-1)*pitch];
f1=f1A[x+y*pitch];
f2=f2A[x+y*pitch];
f3=f3A[x+y*pitch];
f4=f4A[x+y*pitch];
f5=f5A[x+y*pitch];
f6=f6A[x+y*pitch];
f7=f7A[x+y*pitch];
f8=f8A[x+y*pitch];
// f1B[x+y*pitch]=f1+1;
// f3B[x+y*pitch]=f3+1;
// f2B[x+y*pitch]=f2+1;
// f4B[x+y*pitch]=f4+1;
// f5B[x+y*pitch]=f5+1;
// f6B[x+y*pitch]=f6+1;
// f7B[x+y*pitch]=f7+1;
// f8B[x+y*pitch]=f8+1;
//}
// f1=f1A[x+y*pitch];
// f3=f3A[x+y*pitch];
// f2=f2A[x+y*pitch];
// f4=f4A[x+y*pitch];
// f5=f5A[x+y*pitch];
// f7=f7A[x+y*pitch];
// f6=f6A[x+y*pitch];
// f8=f8A[x+y*pitch];
f1B[x+y*pitch]=f1;
f2B[x+y*pitch]=f2;
f3B[x+y*pitch]=f3;
f4B[x+y*pitch]=f4;
f5B[x+y*pitch]=f5;
f6B[x+y*pitch]=f6;
f7B[x+y*pitch]=f7;
f8B[x+y*pitch]=f8;
}
__device__ void collide(float &f0, float &f1, float &f2,
float &f3, float &f4, float &f5,
float &f6, float &f7, float &f8, float rho, float u, float v, float omega)
{
float m1,m2,m4,m6,m7,m8;
m1 =-4.f*f0 - f1 - f2 - f3 - f4+ 2.f*f5+ 2.f*f6+ 2.f*f7+ 2.f*f8-(-2.0f*rho+3.0f*(u*u+v*v));
m2 = 4.f*f0 -2.f*f1 -2.f*f2 -2.f*f3 -2.f*f4+ f5+ f6+ f7+ f8-(rho-3.0f*(u*u+v*v)); //ep
m4 = -2.f*f1 + 2.f*f3 + f5 - f6 - f7+ f8-(-u);//qx_eq
m6 = -2.f*f2 + 2.f*f4+ f5+ f6 - f7 - f8-(-v);//qy_eq
m7 = f1 - f2+ f3 - f4 -(u*u-v*v);//pxx_eq
m8 = f5 - f6+ f7 - f8-(u*v);//pxy_eq
f0=f0-(-m1+m2)*0.11111111f;//(-4.f*(m1)/36.0f+4.f *(m2)/36.0f);
f1=f1-(-m1-2.0f*(m2+m4)+m7*omega*9.0f)*0.027777777f;
f2=f2-(-m1-2.f*m2-6.f*m6-m7*omega*9.0f)*0.027777777f;
f3=f3-(-m1-2.f*m2+6.f*m4+m7*omega*9.0f)*0.027777777f;
f4=f4-(-m1-2.f*m2+6.f*m6-m7*omega*9.0f)*0.027777777f;
f5=f5-(2.f*m1+m2+3.f*m4+3.f*m6+m8*omega*9.0f)*0.027777777f;
f6=f6-(2.f*m1+m2-3.f*m4+3.f*m6-m8*omega*9.0f)*0.027777777f;
f7=f7-(2.f*m1+m2-3.f*m4-3.f*m6+m8*omega*9.0f)*0.027777777f;
f8=f8-(2.f*m1+m2+3.f*m4-3.f*m6-m8*omega*9.0f)*0.027777777f;
// f0=f0-(-m1+m2)/9.0f;//(-4.f*(m1)/36.0f+4.f *(m2)/36.0f);
// f1=f1-(-m1-2.0f*(m2+m4)+m7*omega*9.0f)/36.0f;
// f2=f2-(-m1-2.f*m2-6.f*m6-m7*omega*9.0f)/36.0f;
// f3=f3-(-m1-2.f*m2+6.f*m4+m7*omega*9.0f)/36.0f;
// f4=f4-(-m1-2.f*m2+6.f*m6-m7*omega*9.0f)/36.0f;
// f5=f5-(2.f*m1+m2+3.f*m4+3.f*m6+m8*omega*9.0f)/36.0f;
// f6=f6-(2.f*m1+m2-3.f*m4+3.f*m6-m8*omega*9.0f)/36.0f;
// f7=f7-(2.f*m1+m2-3.f*m4-3.f*m6+m8*omega*9.0f)/36.0f;
// f8=f8-(2.f*m1+m2+3.f*m4-3.f*m6-m8*omega*9.0f)/36.0f;
// float feq;
// float usqr = u*u+v*v;
// feq = 4.0f/9.0f*(rho-1.5f*usqr);
// f0 = f0-omega*(f0-feq);
// feq = 1.0f/9.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// f1 = f1-omega*(f1-feq);
// feq = 1.0f/9.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// f2 = f2-omega*(f2-feq);
// feq = 1.0f/9.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// f3 = f3-omega*(f3-feq);
// feq = 1.0f/9.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f4 = f4-omega*(f4-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// f5 = f5-omega*(f5-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// f6 = f6-omega*(f6-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f7 = f7-omega*(f7-feq);
// feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// f8 = f8-omega*(f8-feq);
}
__global__ void mrt_d_textAB(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B,
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int i = x+y*blockDim.x*gridDim.x;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8;
//f0 = f0A[x+y*pitch];
// f2 = f4A[x +(y+1)*pitch];
// f4 = f2A[x +(y-1)*pitch];
// f1 = tex2D(texRef_f3A,x+1,y );
// f3 = tex2D(texRef_f1A,x-1,y );
// f5 = tex2D(texRef_f7A,x+1,y+1);
// f7 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f8A,x-1,y+1);
// f8 = tex2D(texRef_f6A,x+1,y-1);
// f1=tex2D(texRef_f3A,x+1,y );
// f3=tex2D(texRef_f1A,x-1,y );
// f2=tex2D(texRef_f4A,x ,y+1);
// f4=tex2D(texRef_f2A,x ,y-1);
// f5=tex2D(texRef_f7A,x+1,y+1);
// f7=tex2D(texRef_f5A,x-1,y-1);
// f6=tex2D(texRef_f8A,x-1,y+1);
// f8=tex2D(texRef_f6A,x+1,y-1);
//
f1 = f3A[x+1+(y )*pitch];
f3 = f1A[x-1+(y )*pitch];
f2 = f4A[x +(y+1)*pitch];
f4 = f2A[x +(y-1)*pitch];
f5 = f7A[x+1+(y+1)*pitch];
f7 = f5A[x-1+(y-1)*pitch];
f6 = f8A[x-1+(y+1)*pitch];
f8 = f6A[x+1+(y-1)*pitch];
// f0A[x+y*pitch] = f0A[x+y*pitch];
// f1B[x+y*pitch] = f1A[x+(y)*pitch];
// f3B[x+y*pitch] = f2A[x+(y)*pitch];
// f2B[x+y*pitch] = f3A[x+(y)*pitch];
// f4B[x+y*pitch] = f4A[x+(y)*pitch];
// f5B[x+y*pitch] = f5A[x+(y)*pitch];
// f7B[x+y*pitch] = f6A[x+(y)*pitch];
// f6B[x+y*pitch] = f7A[x+(y)*pitch];
// f8B[x+y*pitch] = f8A[x+(y)*pitch];
//f0A[x+y*pitch] = f0;
f2B[x+y*pitch] = f2;
f4B[x+y*pitch] = f4;
f1B[x+y*pitch] = f1;
f3B[x+y*pitch] = f3;
f5B[x+y*pitch] = f5;
f7B[x+y*pitch] = f7;
f6B[x+y*pitch] = f6;
f8B[x+y*pitch] = f8;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8;
float u,v,rho;//,feq,usqr;
// f0 = f0A[x +y*pitch];
// f2 = f2A[x +(y-1)*pitch];
// f4 = f4A[x +(y+1)*pitch];
// f1 = tex2D(texRef_f1A,x-1,y );
// f3 = tex2D(texRef_f3A,x+1,y );
// f5 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f6A,x+1,y-1);
// f7 = tex2D(texRef_f7A,x+1,y+1);
// f8 = tex2D(texRef_f8A,x-1,y+1);
// f1 = tex2D(texRef_f1A,x-1,y );
// f2 = tex2D(texRef_f2A,x ,y-1);
// f3 = tex2D(texRef_f3A,x+1,y );
// f4 = tex2D(texRef_f4A,x ,y+1);
// f5 = tex2D(texRef_f5A,x-1,y-1);
// f6 = tex2D(texRef_f6A,x+1,y-1);
// f7 = tex2D(texRef_f7A,x+1,y+1);
// f8 = tex2D(texRef_f8A,x-1,y+1);
// f0 = f0A[x +y*pitch];
f0 = f0A[x +y*pitch];
f1 = f1A[x-1+y*pitch];
f2 = f2A[x +(y-1)*pitch];
f3 = f3A[x+1+y*pitch];
f4 = f4A[x +(y+1)*pitch];
f5 = f5A[x-1+(y-1)*pitch];
f6 = f6A[x+1+(y-1)*pitch];
f7 = f7A[x+1+(y+1)*pitch];
f8 = f8A[x-1+(y+1)*pitch];
// f0 = f0A[x +y*pitch];
// f1 = f1A[x+y*pitch];
// f2 = f2A[x+y*pitch];
// f3 = f3A[x+y*pitch];
// f4 = f4A[x+y*pitch];
// f5 = f5A[x+y*pitch];
// f6 = f6A[x+y*pitch];
// f7 = f7A[x+y*pitch];
// f8 = f8A[x+y*pitch];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8;
u = f1-f3+f5-f6-f7+f8;
v = f2-f4+f5+f6-f7-f8;
if(im == 2){
u = 0.0f;
v = uMax;
rho = u+(f0+f2+f4+2.0f*f3+2.0f*f6+2.0f*f7);
//f1 = f3+4.0f*u/6.0f;
f1 = f3+u*0.66666667f;
f5 = f7-0.5f*(f2-f4)+v*0.5f+u*0.166666667f;
f8 = f6+0.5f*(f2-f4)-v*0.5f+u*0.166666667f;
}
else if(im == 3){//north
u = uMax;
v = 0.0f;
rho = -v+(f0+f1+f3+2.0f*f6+2.0f*f2+2.0f*f5);
//f4 = f2-4.0f*v/6.0f;
f4 = f2-v*0.66666667f;
f7 = f5+0.5f*(f1-f3)-u*0.5f+v*0.166666667f;
f8 = f6-0.5f*(f1-f3)+u*0.5f+v*0.166666667f;
}
collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,rho,u,v,omega);
f0A[y*pitch+x]=f0;
f1B[y*pitch+x]=f1;
f2B[y*pitch+x]=f2;
f3B[y*pitch+x]=f3;
f4B[y*pitch+x]=f4;
f5B[y*pitch+x]=f5;
f6B[y*pitch+x]=f6;
f7B[y*pitch+x]=f7;
f8B[y*pitch+x]=f8;
}
}
__global__ void mrt_d_textBA(float *f0A, float *f1A, float *f2A,
float *f3A, float *f4A, float *f5A,
float *f6A, float *f7A, float *f8A,
float *f1B, float *f2B,
float *f3B, float *f4B, float *f5B,
float *f6B, float *f7B, float *f8B,
int n, int *image, float omega, float uMax, int pitch)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int i = x+y*blockDim.x*gridDim.x;
float u,v,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8;
//f0 = f0A[x+y*pitch];
// f2 = f4A[x +(y+1)*pitch];
// f4 = f2A[x +(y-1)*pitch];
// f1 = tex2D(texRef_f3B,x+1,y );
// f3 = tex2D(texRef_f1B,x-1,y );
// f5 = tex2D(texRef_f7B,x+1,y+1);
// f7 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f8B,x-1,y+1);
// f8 = tex2D(texRef_f6B,x+1,y-1);
//f0A[i] = f0A[i];
// f1=tex2D(texRef_f3B,x+1,y );
// f3=tex2D(texRef_f1B,x-1,y );
// f2=tex2D(texRef_f4B,x ,y+1);
// f4=tex2D(texRef_f2B,x ,y-1);
// f5=tex2D(texRef_f7B,x+1,y+1);
// f7=tex2D(texRef_f5B,x-1,y-1);
// f6=tex2D(texRef_f8B,x-1,y+1);
// f8=tex2D(texRef_f6B,x+1,y-1);
f1 = f3A[x+1+(y )*pitch];
f3 = f1A[x-1+(y )*pitch];
f2 = f4A[x +(y+1)*pitch];
f4 = f2A[x +(y-1)*pitch];
f5 = f7A[x+1+(y+1)*pitch];
f7 = f5A[x-1+(y-1)*pitch];
f6 = f8A[x-1+(y+1)*pitch];
f8 = f6A[x+1+(y-1)*pitch];
// f0A[x+y*pitch] = f0A[x+y*pitch];
// f1B[x+y*pitch] = f1A[x+(y)*pitch];
// f3B[x+y*pitch] = f2A[x+(y)*pitch];
// f2B[x+y*pitch] = f3A[x+(y)*pitch];
// f4B[x+y*pitch] = f4A[x+(y)*pitch];
// f5B[x+y*pitch] = f5A[x+(y)*pitch];
// f7B[x+y*pitch] = f6A[x+(y)*pitch];
// f6B[x+y*pitch] = f7A[x+(y)*pitch];
// f8B[x+y*pitch] = f8A[x+(y)*pitch];
//f0A[x+y*pitch] = f0;
f2B[x+y*pitch] = f2;
f4B[x+y*pitch] = f4;
f1B[x+y*pitch] = f1;
f3B[x+y*pitch] = f3;
f5B[x+y*pitch] = f5;
f7B[x+y*pitch] = f7;
f6B[x+y*pitch] = f6;
f8B[x+y*pitch] = f8;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8;
// f0 = f0A[x +y*pitch];
// f2 = f2B[x +(y-1)*pitch];
// f4 = f4B[x +(y+1)*pitch];
// f1 = tex2D(texRef_f1B,x-1,y );
// f3 = tex2D(texRef_f3B,x+1,y );
// f5 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f6B,x+1,y-1);
// f7 = tex2D(texRef_f7B,x+1,y+1);
// f8 = tex2D(texRef_f8B,x-1,y+1);
// f1 = tex2D(texRef_f1B,x-1,y );
// f2 = tex2D(texRef_f2B,x ,y-1);
// f3 = tex2D(texRef_f3B,x+1,y );
// f4 = tex2D(texRef_f4B,x ,y+1);
// f5 = tex2D(texRef_f5B,x-1,y-1);
// f6 = tex2D(texRef_f6B,x+1,y-1);
// f7 = tex2D(texRef_f7B,x+1,y+1);
// f8 = tex2D(texRef_f8B,x-1,y+1);
// f0 = f0A[x +y*pitch];
f0 = f0A[x +y*pitch];
f1 = f1A[x-1+y*pitch];
f2 = f2A[x +(y-1)*pitch];
f3 = f3A[x+1+y*pitch];
f4 = f4A[x +(y+1)*pitch];
f5 = f5A[x-1+(y-1)*pitch];
f6 = f6A[x+1+(y-1)*pitch];
f7 = f7A[x+1+(y+1)*pitch];
f8 = f8A[x-1+(y+1)*pitch];
// f0 = f0A[x +y*pitch];
// f1 = f1A[x+y*pitch];
// f2 = f2A[x+y*pitch];
// f3 = f3A[x+y*pitch];
// f4 = f4A[x+y*pitch];
// f5 = f5A[x+y*pitch];
// f6 = f6A[x+y*pitch];
// f7 = f7A[x+y*pitch];
// f8 = f8A[x+y*pitch];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8;
u = f1-f3+f5-f6-f7+f8;
v = f2-f4+f5+f6-f7-f8;
if(im == 2){
u = 0.0f;
v = uMax;
rho = u+(f0+f2+f4+2.0f*f3+2.0f*f6+2.0f*f7);
//f1 = f3+4.0f*u/6.0f;
f1 = f3+u*0.66666667f;
f5 = f7-0.5f*(f2-f4)+v*0.5f+u*0.166666667f;
f8 = f6+0.5f*(f2-f4)-v*0.5f+u*0.166666667f;
}
else if(im == 3){//north
u = uMax;
v = 0.0f;
rho = -v+(f0+f1+f3+2.0f*f6+2.0f*f2+2.0f*f5);
//f4 = f2-4.0f*v/6.0f;
f4 = f2-v*0.66666667f;
f7 = f5+0.5f*(f1-f3)-u*0.5f+v*0.166666667f;
f8 = f6-0.5f*(f1-f3)+u*0.5f+v*0.166666667f;
}
collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,rho,u,v,omega);
f0A[y*pitch+x]=f0;
f1B[y*pitch+x]=f1;
f2B[y*pitch+x]=f2;
f3B[y*pitch+x]=f3;
f4B[y*pitch+x]=f4;
f5B[y*pitch+x]=f5;
f6B[y*pitch+x]=f6;
f7B[y*pitch+x]=f7;
f8B[y*pitch+x]=f8;
}
}
__global__ void initialize(float *f0, float *f1, float *f2,
float *f3, float *f4, float *f5,
float *f6, float *f7, float *f8,
int n, int pitch)
{
int i;
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
i = threadIdx.y*blockDim.x+threadIdx.x;
i += blockDim.x*blockDim.y*gridDim.x*blockIdx.y;
i += blockDim.x*blockDim.y*blockIdx.x;
//i = y*blockDim.x+x;
//f1[y*pitch+x] = tex2D(texRef_f1,x,y);
float u,v,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
usqr = u*u+v*v;
feq = 4.0f/9.0f*(rho-1.5f*usqr);
f0[i] = feq;
feq = 1.0f/9.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[y*pitch+x] = feq;
feq = 1.0f/9.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[y*pitch+x] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[y*pitch+x] = feq;
}
int main(int argc, char *argv[])
{
float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h;
float *f0_d, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA;
float *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB;
int *image_d, *image_h;
ofstream output;
output.open ("LBM1_out.dat");
size_t memsize, memsize_int;
size_t pitch;
int i, tMax, n, nBlocks, xDim, yDim;
float Re, omega, uMax, CharLength;
int BLOCKSIZEx = 128;
int BLOCKSIZEy = 1;
xDim = 1024;//32;
yDim = 1024;//32;
tMax = 500;
Re = 500.f;//100.f;
uMax = 0.08f;
CharLength = xDim-2.f;
omega = 1.0f/(3.0f*(uMax*CharLength/Re)+0.5f);
cout<<"omega: "<<omega<<endl;
nBlocks = (xDim/BLOCKSIZEx+xDim%BLOCKSIZEx)*(yDim/BLOCKSIZEy+yDim%BLOCKSIZEy);
int B = BLOCKSIZEx*BLOCKSIZEy;
n = nBlocks*B;//block*dimx*dimy
//CUT_DEVICE_INIT(argc,argv);
memsize = n*sizeof(float);
memsize_int = n*sizeof(int);
f0_h = (float *)malloc(memsize);
f1_h = (float *)malloc(memsize);
f2_h = (float *)malloc(memsize);
f3_h = (float *)malloc(memsize);
f4_h = (float *)malloc(memsize);
f5_h = (float *)malloc(memsize);
f6_h = (float *)malloc(memsize);
f7_h = (float *)malloc(memsize);
f8_h = (float *)malloc(memsize);
image_h = (int *)malloc(memsize_int);
//cudaMalloc((void **) &f0_d, memsize);
//CUDA_SAFE_CALL(cudaMalloc((void **) &f1_d, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f1_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f2_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f3_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f4_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f5_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f6_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f7_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f8_dA, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f1_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f2_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f3_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f4_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f5_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f6_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f7_dB, memsize));
// CUDA_SAFE_CALL(cudaMalloc((void **) &f8_dB, memsize));
// pitch = xDim*sizeof(float);
cudaMallocPitch((void **) &f0_d , &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f1_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f2_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f3_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f4_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f5_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f6_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f7_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f8_dA, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f1_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f2_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f3_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f4_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f5_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f6_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f7_dB, &pitch, xDim*sizeof(float), yDim);
cudaMallocPitch((void **) &f8_dB, &pitch, xDim*sizeof(float), yDim);
//CUDA_SAFE_CALL(cudaMalloc((void **) &f2_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f3_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f4_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f5_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f6_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f7_d, memsize));
//CUDA_SAFE_CALL(cudaMalloc((void **) &f8_d, memsize));
cudaMalloc((void **) &image_d, memsize_int);
for (i = 0; i < n; i++)
{
int x = i%xDim;
int y = i/xDim;
f0_h[i] = i;
f1_h[i] = n-i;
f2_h[i] = i;
f3_h[i] = i;
f4_h[i] = i;
f5_h[i] = i;
f6_h[i] = i;
f7_h[i] = i;
f8_h[i] = i;
image_h[i] = 0;
if(x < 1) image_h[i] = 1;//DirichletWest
if(x > xDim-2) image_h[i] = 1;//BB
if(y < 1) image_h[i] = 1;//BB
if(y > yDim-2) image_h[i] = 3;//BB
}
//cudaMemcpy(f0_d, f0_h, memsize, cudaMemcpyHostToDevice);
//CUDA_SAFE_CALL(cudaMemcpy(f1_d, f1_h, memsize, cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f1_dA,f1_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f2_dA,f2_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f3_dA,f3_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f4_dA,f4_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f5_dA,f5_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f6_dA,f6_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f7_dA,f7_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f8_dA,f8_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f1_dB,f1_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f2_dB,f2_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f3_dB,f3_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f4_dB,f4_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f5_dB,f5_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f6_dB,f6_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f7_dB,f7_h,memsize,cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(f8_dB,f8_h,memsize,cudaMemcpyHostToDevice));
cudaMemcpy2D(f0_d ,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f1_dA,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f2_dA,pitch,f2_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f3_dA,pitch,f3_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f4_dA,pitch,f4_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f5_dA,pitch,f5_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f6_dA,pitch,f6_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f7_dA,pitch,f7_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f8_dA,pitch,f8_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f1_dB,pitch,f1_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f2_dB,pitch,f2_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f3_dB,pitch,f3_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f4_dB,pitch,f4_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f5_dB,pitch,f5_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f6_dB,pitch,f6_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f7_dB,pitch,f7_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
cudaMemcpy2D(f8_dB,pitch,f8_h,xDim*sizeof(float),xDim*sizeof(float),yDim,cudaMemcpyHostToDevice);
//CUDA_SAFE_CALL(cudaMemcpy(f2_d, f2_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f3_d, f3_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f4_d, f4_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f5_d, f5_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f6_d, f6_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f7_d, f7_h, memsize, cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(f8_d, f8_h, memsize, cudaMemcpyHostToDevice));
cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
//cout<<(int)(pitch/sizeof(float))<<endl;
dim3 threads(BLOCKSIZEx, BLOCKSIZEy);
dim3 grid(xDim/BLOCKSIZEx,yDim/BLOCKSIZEy);
cout<<"nBlocks:"<<nBlocks<<endl;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f1A.filterMode = cudaFilterModePoint;
texRef_f2A.filterMode = cudaFilterModePoint;
texRef_f3A.filterMode = cudaFilterModePoint;
texRef_f4A.filterMode = cudaFilterModePoint;
texRef_f5A.filterMode = cudaFilterModePoint;
texRef_f6A.filterMode = cudaFilterModePoint;
texRef_f7A.filterMode = cudaFilterModePoint;
texRef_f8A.filterMode = cudaFilterModePoint;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f1B.filterMode = cudaFilterModePoint;
texRef_f2B.filterMode = cudaFilterModePoint;
texRef_f3B.filterMode = cudaFilterModePoint;
texRef_f4B.filterMode = cudaFilterModePoint;
texRef_f5B.filterMode = cudaFilterModePoint;
texRef_f6B.filterMode = cudaFilterModePoint;
texRef_f7B.filterMode = cudaFilterModePoint;
texRef_f8B.filterMode = cudaFilterModePoint;
initialize<<<grid, threads>>>(f0_d, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA,
n,(int)(pitch/sizeof(float)));
cudaBindTexture2D(0,&texRef_f1A, f1_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f2A, f2_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f3A, f3_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f4A, f4_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f5A, f5_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f6A, f6_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f7A, f7_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f8A, f8_dA,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f1B, f1_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f2B, f2_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f3B, f3_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f4B, f4_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f5B, f5_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f6B, f6_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f7B, f7_dB,&desc,xDim,yDim,pitch);
cudaBindTexture2D(0,&texRef_f8B, f8_dB,&desc,xDim,yDim,pitch);
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<tMax; t=t+2){
//for(int t = 0; t<tMax; t=t+1){
//mrt_d<<<grid, threads>>>(f0_d,f1_d,f2_d,f3_d,f4_d,f5_d,f6_d,f7_d,f8_d,n,image_d,omega,uMax);
//test<<<grid, threads>>>(f0_d,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
mrt_d_textAB<<<grid, threads>>>(f0_d,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
n,image_d,omega,uMax,(int)(pitch/sizeof(float)));
//test<<<grid, threads>>>(f0_d,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
mrt_d_textBA<<<grid, threads>>>(f0_d,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,
f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
n,image_d,omega,uMax,(int)(pitch/sizeof(float)));
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
cout<<"Time taken for main kernel: "<<restime<<" ("<<double(xDim*yDim*double(tMax/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<xDim<<","<<yDim<<","<<tMax<<","<<restime<<endl;
cudaUnbindTexture(texRef_f1A);
cudaUnbindTexture(texRef_f2A);
cudaUnbindTexture(texRef_f3A);
cudaUnbindTexture(texRef_f4A);
cudaUnbindTexture(texRef_f5A);
cudaUnbindTexture(texRef_f6A);
cudaUnbindTexture(texRef_f7A);
cudaUnbindTexture(texRef_f8A);
cudaUnbindTexture(texRef_f1B);
cudaUnbindTexture(texRef_f2B);
cudaUnbindTexture(texRef_f3B);
cudaUnbindTexture(texRef_f4B);
cudaUnbindTexture(texRef_f5B);
cudaUnbindTexture(texRef_f6B);
cudaUnbindTexture(texRef_f7B);
cudaUnbindTexture(texRef_f8B);
//CUT_CHECK_ERROR("Kernel execution failed");
//cudaMemcpy(f0_h, f0_d, memsize, cudaMemcpyDeviceToHost);
//CUDA_SAFE_CALL(cudaMemcpy(f1_h, f1_d, memsize, cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f1_h,f1_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f2_h,f2_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f3_h,f3_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f4_h,f4_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f5_h,f5_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f6_h,f6_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f7_h,f7_dB,memsize,cudaMemcpyDeviceToHost));
// CUDA_SAFE_CALL(cudaMemcpy(f8_h,f8_dB,memsize,cudaMemcpyDeviceToHost));
cudaMemcpy2D(f0_h,xDim*sizeof(float),f0_d ,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f1_h,xDim*sizeof(float),f1_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f2_h,xDim*sizeof(float),f2_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f3_h,xDim*sizeof(float),f3_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f4_h,xDim*sizeof(float),f4_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f5_h,xDim*sizeof(float),f5_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f6_h,xDim*sizeof(float),f6_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f7_h,xDim*sizeof(float),f7_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(f8_h,xDim*sizeof(float),f8_dB,pitch,xDim*sizeof(float),yDim,cudaMemcpyDeviceToHost);
//CUDA_SAFE_CALL(cudaMemcpy(f2_h, f2_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f3_h, f3_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f4_h, f4_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f5_h, f5_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f6_h, f6_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f7_h, f7_d, memsize, cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(f8_h, f8_d, memsize, cudaMemcpyDeviceToHost));
output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<xDim<<", J="<<yDim<<"\n";
// for(i = 0; i<n; i++)
// {
int row = 0;
int col = 0;
i = 0;
//int rowB, colB;
//float xcoord, ycoord;
float rho, u, v;
rho = 0;
u = 0;
v = 0;
for(row = 0; row<yDim; row++){
for(col = 0; col<xDim; col++){
i = row*xDim+col;
rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i];
u = f1_h[i]-f3_h[i]+f5_h[i]-f6_h[i]-f7_h[i]+f8_h[i];
v = f2_h[i]-f4_h[i]+f5_h[i]+f6_h[i]-f7_h[i]-f8_h[i];
output<<col<<", "<<row<<", "<<u<<","<<v<<","<<rho<<endl;
}
}
free(f0_h);
free(f1_h);
free(f2_h);
free(f3_h);
free(f4_h);
free(f5_h);
free(f6_h);
free(f7_h);
free(f8_h);
output.close();
cudaFree(f0_d);
cudaFree(f1_dA);
cudaFree(f2_dA);
cudaFree(f3_dA);
cudaFree(f4_dA);
cudaFree(f5_dA);
cudaFree(f6_dA);
cudaFree(f7_dA);
cudaFree(f8_dA);
cudaFree(f1_dB);
cudaFree(f2_dB);
cudaFree(f3_dB);
cudaFree(f4_dB);
cudaFree(f5_dB);
cudaFree(f6_dB);
cudaFree(f7_dB);
cudaFree(f8_dB);
cudaFree(image_d);
return(0);
}
|
92a2af4d9cd2d17110cd4eef6733a66562a9afda.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 11/06/2014
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <vec_mat_mult_kernel.cu>
#define MIN_NUMBER 1
#define MAX_NUMBER 4
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void vec_mat_mult_on_device(const Matrix M, const Matrix N, Matrix P);
void print_matrix(const Matrix M);
float get_random_number(int, int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random 512 X 512 matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random 512 x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu = allocate_matrix(MATRIX_SIZE, 1, 0);
// Perform the vector-matrix multiplication on the GPU using global memory
vec_mat_mult_on_device(A, X, Y_gpu);
// compute the vector-matrix multiplication on the CPU for comparison
unsigned int timer;
cutCreateTimer(&timer);
cutStartTimer(timer);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
cutStopTimer(timer);
printf("Execution time on the CPU: %f seconds. \n", (float)cutGetTimerValue(timer)/1000.0);
// check if the device result is equivalent to the expected solution
int size_elements = NUM_ROWS;
CUTBoolean res = cutComparefe(Y_cpu.elements, Y_gpu.elements, size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu.elements); Y_gpu.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
void vec_mat_mult_on_device(const Matrix A, const Matrix X, Matrix Y){
}
// Allocate a device matrix of same size as M.
Matrix allocate_matrix_on_gpu(const Matrix M){
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
|
92a2af4d9cd2d17110cd4eef6733a66562a9afda.cu
|
/* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 11/06/2014
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <vec_mat_mult_kernel.cu>
#define MIN_NUMBER 1
#define MAX_NUMBER 4
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void vec_mat_mult_on_device(const Matrix M, const Matrix N, Matrix P);
void print_matrix(const Matrix M);
float get_random_number(int, int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random 512 X 512 matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random 512 x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu = allocate_matrix(MATRIX_SIZE, 1, 0);
// Perform the vector-matrix multiplication on the GPU using global memory
vec_mat_mult_on_device(A, X, Y_gpu);
// compute the vector-matrix multiplication on the CPU for comparison
unsigned int timer;
cutCreateTimer(&timer);
cutStartTimer(timer);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
cutStopTimer(timer);
printf("Execution time on the CPU: %f seconds. \n", (float)cutGetTimerValue(timer)/1000.0);
// check if the device result is equivalent to the expected solution
int size_elements = NUM_ROWS;
CUTBoolean res = cutComparefe(Y_cpu.elements, Y_gpu.elements, size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu.elements); Y_gpu.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
void vec_mat_mult_on_device(const Matrix A, const Matrix X, Matrix Y){
}
// Allocate a device matrix of same size as M.
Matrix allocate_matrix_on_gpu(const Matrix M){
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
|
11f2f70af7418173e3665751fda3d10395c3964c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_residue_eval;
int xdim0_residue_eval_h = -1;
int ydim0_residue_eval_h = -1;
__constant__ int xdim1_residue_eval;
int xdim1_residue_eval_h = -1;
int ydim1_residue_eval_h = -1;
__constant__ int xdim2_residue_eval;
int xdim2_residue_eval_h = -1;
int ydim2_residue_eval_h = -1;
__constant__ int xdim3_residue_eval;
int xdim3_residue_eval_h = -1;
int ydim3_residue_eval_h = -1;
__constant__ int xdim4_residue_eval;
int xdim4_residue_eval_h = -1;
int ydim4_residue_eval_h = -1;
__constant__ int xdim5_residue_eval;
int xdim5_residue_eval_h = -1;
int ydim5_residue_eval_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
#define OPS_ACC5(x) (x)
// user function
__device__
void
residue_eval(const double *der1, const double *der2, const double *der3,
double *rho_res, double *rhou_res, double *rhoE_res) {
rho_res[OPS_ACC3(0)] = der1[OPS_ACC0(0)];
rhou_res[OPS_ACC4(0)] = der2[OPS_ACC1(0)];
rhoE_res[OPS_ACC5(0)] = der3[OPS_ACC2(0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_residue_eval(const double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
arg5 += idx_x * 1 * 1;
if (idx_x < size0) {
residue_eval(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
void ops_par_loop_residue_eval(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "residue_eval");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_residue_eval_h || xdim1 != xdim1_residue_eval_h ||
xdim2 != xdim2_residue_eval_h || xdim3 != xdim3_residue_eval_h ||
xdim4 != xdim4_residue_eval_h || xdim5 != xdim5_residue_eval_h) {
hipMemcpyToSymbol(xdim0_residue_eval, &xdim0, sizeof(int));
xdim0_residue_eval_h = xdim0;
hipMemcpyToSymbol(xdim1_residue_eval, &xdim1, sizeof(int));
xdim1_residue_eval_h = xdim1;
hipMemcpyToSymbol(xdim2_residue_eval, &xdim2, sizeof(int));
xdim2_residue_eval_h = xdim2;
hipMemcpyToSymbol(xdim3_residue_eval, &xdim3, sizeof(int));
xdim3_residue_eval_h = xdim3;
hipMemcpyToSymbol(xdim4_residue_eval, &xdim4, sizeof(int));
xdim4_residue_eval_h = xdim4;
hipMemcpyToSymbol(xdim5_residue_eval, &xdim5, sizeof(int));
xdim5_residue_eval_h = xdim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_residue_eval), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
|
11f2f70af7418173e3665751fda3d10395c3964c.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_residue_eval;
int xdim0_residue_eval_h = -1;
int ydim0_residue_eval_h = -1;
__constant__ int xdim1_residue_eval;
int xdim1_residue_eval_h = -1;
int ydim1_residue_eval_h = -1;
__constant__ int xdim2_residue_eval;
int xdim2_residue_eval_h = -1;
int ydim2_residue_eval_h = -1;
__constant__ int xdim3_residue_eval;
int xdim3_residue_eval_h = -1;
int ydim3_residue_eval_h = -1;
__constant__ int xdim4_residue_eval;
int xdim4_residue_eval_h = -1;
int ydim4_residue_eval_h = -1;
__constant__ int xdim5_residue_eval;
int xdim5_residue_eval_h = -1;
int ydim5_residue_eval_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
#define OPS_ACC5(x) (x)
// user function
__device__
void
residue_eval(const double *der1, const double *der2, const double *der3,
double *rho_res, double *rhou_res, double *rhoE_res) {
rho_res[OPS_ACC3(0)] = der1[OPS_ACC0(0)];
rhou_res[OPS_ACC4(0)] = der2[OPS_ACC1(0)];
rhoE_res[OPS_ACC5(0)] = der3[OPS_ACC2(0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_residue_eval(const double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
arg5 += idx_x * 1 * 1;
if (idx_x < size0) {
residue_eval(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
void ops_par_loop_residue_eval(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "residue_eval");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_residue_eval_h || xdim1 != xdim1_residue_eval_h ||
xdim2 != xdim2_residue_eval_h || xdim3 != xdim3_residue_eval_h ||
xdim4 != xdim4_residue_eval_h || xdim5 != xdim5_residue_eval_h) {
cudaMemcpyToSymbol(xdim0_residue_eval, &xdim0, sizeof(int));
xdim0_residue_eval_h = xdim0;
cudaMemcpyToSymbol(xdim1_residue_eval, &xdim1, sizeof(int));
xdim1_residue_eval_h = xdim1;
cudaMemcpyToSymbol(xdim2_residue_eval, &xdim2, sizeof(int));
xdim2_residue_eval_h = xdim2;
cudaMemcpyToSymbol(xdim3_residue_eval, &xdim3, sizeof(int));
xdim3_residue_eval_h = xdim3;
cudaMemcpyToSymbol(xdim4_residue_eval, &xdim4, sizeof(int));
xdim4_residue_eval_h = xdim4;
cudaMemcpyToSymbol(xdim5_residue_eval, &xdim5, sizeof(int));
xdim5_residue_eval_h = xdim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_residue_eval<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
|
sparse_fill_empty_rows.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_fill_empty_rows.cuh"
#include "plugin/device/cpu/kernel/nnacl/op_base.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
struct BoolToSize {
typedef int index_type;
__device__ index_type operator()(bool x) const { return x ? 1 : 0; }
};
// Todo: According to the sorted_order, assign new value to output ptr.Calculate RevsereIndexMap.
// Input: values_ptr, indice_ptr, sorted_order, dense_row, default_value, emptyrow_count,
// input_row_end, output_values, output_indices.
// Output: output_values, output_indices
template <typename S>
__global__ void AssignValueKernel(Complex<S> *values_ptr, int64_t *indice_ptr, int64_t *sorted_order, size_t dense_row,
Complex<S> *default_value, int *emptyrow_count, int64_t *input_row_end,
Complex<S> *output_values, int64_t *output_indices, int indice_num,
size_t *real_indice_num, int64_t *reverse_index_map) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (i > 0 && input_row_end[i] == input_row_end[i - 1]) {
// Empty row case. Calculate element in ith row by input_row_end[i]-input_row_end[i-1].
int index = input_row_end[i] + emptyrow_count[i] - 1;
output_values[index] = *default_value;
output_indices[2 * index] = i;
output_indices[2 * index + 1] = 0;
} else if (i > 0 && input_row_end[i] > input_row_end[i - 1]) {
// Not an empty row, calculate elements num and assign value to output_indice & output_value.
for (int j = input_row_end[i - 1]; j < input_row_end[i]; j++) {
int index_out = j + emptyrow_count[i];
int index_in = sorted_order[j];
output_values[index_out] = values_ptr[index_in];
output_indices[2 * index_out] = indice_ptr[2 * index_in];
output_indices[2 * index_out + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = index_out;
}
} else if (i == 0 && input_row_end[0] == 0) {
// If the first row has no element.
output_values[0] = *default_value;
output_indices[0] = 0;
output_indices[1] = 0;
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
} else if (i == 0 && input_row_end[0] > 0) {
// The first row is not empty case.
for (int j = 0; j < input_row_end[i]; j++) {
int index_in = sorted_order[j];
output_values[j] = values_ptr[index_in];
output_indices[2 * j] = indice_ptr[2 * index_in];
output_indices[2 * j + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = j;
}
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
}
}
return;
}
// Todo: Calculate the elements num of each row.
// Input: dense_shape_ptr, row_indice, indice_num, cuda_stream
// Output: elements_per_row
__global__ void CalElementPerRowsKernel(int64_t *dense_shape_ptr, int64_t *indices_ptr, int64_t *elements_per_row,
int indice_num) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indice_num; i += blockDim.x * gridDim.x) {
int row = indices_ptr[i];
MsAtomicAdd(&elements_per_row[row], static_cast<int64_t>(1));
}
}
// Todo: Calculate output_empty_row_indicator_ptr.
// Input: elements_per_row, dense_row, output_empty_row_indicator_ptr.
// Output: output_empty_row_indicator_ptr.
__global__ void CalEmptyRowIndicatorKernel(int64_t *elements_per_row, size_t dense_row,
bool *output_empty_row_indicator_ptr) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (elements_per_row[i] == 0) {
output_empty_row_indicator_ptr[i] = 1;
} else {
output_empty_row_indicator_ptr[i] = 0;
}
}
}
// Todo: Extract row index in indice_ptr & Generate an ascend value index.
// Input: indices_ptr, row_indices, originorder, indice_num.
// Output: row_indices, originorder.
__global__ void CopyRowIndiceKernel(int64_t *indices_ptr, int64_t *row_indices, int64_t *origin_index, int indice_num) {
int rank = 2;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < indice_num; i += blockDim.x * gridDim.x) {
row_indices[i] = indices_ptr[i * rank];
origin_index[i] = i;
}
}
// Todo: Calculate the inclusive sum of empty_row_indicator.
// Input: dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream.
// Output: empty_row_count_sum.
void InclusiveBoolPrefixSum(size_t dense_row, bool *output_empty_row_indicator_ptr, int *empty_row_count_sum,
hipStream_t cuda_stream) {
BoolToSize op;
hipcub::TransformInputIterator<int, BoolToSize, const bool *> iter(output_empty_row_indicator_ptr, op);
size_t temp_storage_bytes = 0;
(void)hipcub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, iter, empty_row_count_sum,
static_cast<int>(dense_row), cuda_stream);
void *d_temp_storage = nullptr;
hipStreamSynchronize(cuda_stream);
(void)hipMalloc(&d_temp_storage, temp_storage_bytes);
(void)hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, iter, empty_row_count_sum,
static_cast<int>(dense_row), cuda_stream);
hipStreamSynchronize(cuda_stream);
(void)hipFree(d_temp_storage);
}
// Todo: Calculate the inclusive sum of elements_per_row.
// Input: dense_row, elements_per_row, input_row_ends, cuda_stream.
// Output: input_row_ends.
void InclusivePrefixSum(size_t dense_row, int64_t *elements_per_row, int64_t *input_row_ends,
hipStream_t cuda_stream) {
if (dense_row == 0) {
return;
}
size_t temp_storage_bytes = 0;
(void)hipcub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, elements_per_row, input_row_ends,
static_cast<int>(dense_row), cuda_stream);
void *d_temp_storage = nullptr;
(void)hipMalloc(&d_temp_storage, temp_storage_bytes);
(void)hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, elements_per_row, input_row_ends,
static_cast<int>(dense_row), cuda_stream);
hipStreamSynchronize(cuda_stream);
(void)hipFree(d_temp_storage);
}
// Todo: Sort the row_indice by key into ascend order, so we can get an key-value pair(origin index - sorted_order).
// Input: indice_size, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_order, device_id.
// Output: sorted_key, sorted_order.
__host__ void RowsSort(int64_t indice_size, hipStream_t cuda_stream, int64_t *dense_shape_ptr, int64_t *row_indices,
int64_t *origin_index, int64_t *sorted_key, int64_t *sorted_order, int device_id) {
size_t temp_storage_ = 0;
(void)hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_, row_indices, static_cast<int64_t *>(nullptr),
origin_index, sorted_order, indice_size, 0, sizeof(int64_t) * 8, cuda_stream);
void *d_temp_ = nullptr;
(void)hipMalloc(&d_temp_, temp_storage_);
(void)hipcub::DeviceRadixSort::SortPairs(d_temp_, temp_storage_, row_indices, static_cast<int64_t *>(sorted_key),
origin_index, sorted_order, indice_size, 0, sizeof(int64_t) * 8, cuda_stream);
(void)hipFree(d_temp_);
}
template <typename S>
__global__ void AssignValueKernel(S *values_ptr, int64_t *indice_ptr, int64_t *sorted_indices, size_t dense_row,
S *default_value, int *emptyrow_count, int64_t *input_row_end, S *output_values,
int64_t *output_indices, int indice_num, size_t *real_indice_num,
int64_t *reverse_index_map) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (i > 0 && input_row_end[i] == input_row_end[i - 1]) {
// Empty row case. Calculate element in ith row by input_row_end[i]-input_row_end[i-1].
int index = input_row_end[i] + emptyrow_count[i] - 1;
output_values[index] = *default_value;
output_indices[2 * index] = i;
output_indices[2 * index + 1] = 0;
} else if (i > 0 && input_row_end[i] > input_row_end[i - 1]) {
// Not an empty row, calculate elements num and assign value to output_indice & output_value.
for (int j = input_row_end[i - 1]; j < input_row_end[i]; j++) {
int index_out = j + emptyrow_count[i];
int index_in = sorted_indices[j];
output_values[index_out] = values_ptr[index_in];
output_indices[2 * index_out] = indice_ptr[2 * index_in];
output_indices[2 * index_out + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = index_out;
}
} else if (i == 0 && input_row_end[0] == 0) {
// If the first row has no element.
output_values[0] = *default_value;
output_indices[0] = 0;
output_indices[1] = 0;
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
} else if (i == 0 && input_row_end[0] > 0) {
// The first row is not empty case.
for (int j = 0; j < input_row_end[i]; j++) {
output_values[j] = values_ptr[sorted_indices[j]];
output_indices[2 * j] = indice_ptr[2 * sorted_indices[j]];
output_indices[2 * j + 1] = indice_ptr[2 * sorted_indices[j] + 1];
reverse_index_map[sorted_indices[j]] = j;
}
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
}
}
return;
}
template <typename S>
CUDA_LIB_EXPORT void SparseFillEmptyRows(int64_t *indices_ptr, Complex<S> *values_ptr, Complex<S> *default_value,
int64_t *dense_shape_ptr, int device_id, int indice_num, size_t dense_row,
int64_t *elements_per_rows, int *empty_row_count_sum, int64_t *row_indices,
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape,
int64_t *origin_index, int64_t *sorted_key, hipStream_t cuda_stream,
int64_t *output_indices_ptr, Complex<S> *output_values_ptr,
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr) {
int thread_num_dense_row = 256 < dense_row ? 256 : dense_row;
hipDeviceProp_t prop;
(void)hipGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = ::min(static_cast<int>(((dense_row - 1) / thread_num_dense_row) + 1), max_blocks);
hipLaunchKernelGGL(( CopyRowIndiceKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream, indices_ptr, row_indices, origin_index,
indice_num);
hipMemset(elements_per_rows, 0, dense_row * sizeof(int64_t));
int thread_num_indice_num = 256 < indice_num ? 256 : indice_num;
block_num = ::min(static_cast<int>(((indice_num - 1) / thread_num_indice_num) + 1), max_blocks);
hipLaunchKernelGGL(( CalElementPerRowsKernel), dim3(block_num), dim3(thread_num_indice_num), 0, cuda_stream, dense_shape_ptr, row_indices,
elements_per_rows, indice_num);
hipLaunchKernelGGL(( CalEmptyRowIndicatorKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream, elements_per_rows, dense_row,
output_empty_row_indicator_ptr);
InclusivePrefixSum(dense_row, elements_per_rows, input_row_ends, cuda_stream);
InclusiveBoolPrefixSum(dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream);
RowsSort(indice_num, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_indices, device_id);
hipLaunchKernelGGL(( AssignValueKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream,
values_ptr, indices_ptr, sorted_indices, dense_row, default_value, empty_row_count_sum, input_row_ends,
output_values_ptr, output_indices_ptr, indice_num, final_shape, output_reverse_index_map_ptr);
return;
}
template <typename S>
CUDA_LIB_EXPORT void SparseFillEmptyRows(int64_t *indices_ptr, S *values_ptr, S *default_value,
int64_t *dense_shape_ptr, int device_id, int indice_num, size_t dense_row,
int64_t *elements_per_rows, int *empty_row_count_sum, int64_t *row_indices,
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape,
int64_t *origin_index, int64_t *sorted_key, hipStream_t cuda_stream,
int64_t *output_indices_ptr, S *output_values_ptr,
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr) {
int thread_num_dense_row = 256 < dense_row ? 256 : dense_row;
hipDeviceProp_t prop;
(void)hipGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = ::min(static_cast<int>(((dense_row - 1) / thread_num_dense_row) + 1), max_blocks);
hipLaunchKernelGGL(( CopyRowIndiceKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream, indices_ptr, row_indices, origin_index,
indice_num);
hipMemset(elements_per_rows, 0, dense_row * sizeof(int64_t));
int thread_num_indice_num = 256 < indice_num ? 256 : indice_num;
block_num = ::min(static_cast<int>(((indice_num - 1) / thread_num_indice_num) + 1), max_blocks);
hipLaunchKernelGGL(( CalElementPerRowsKernel), dim3(block_num), dim3(thread_num_indice_num), 0, cuda_stream, dense_shape_ptr, row_indices,
elements_per_rows, indice_num);
hipLaunchKernelGGL(( CalEmptyRowIndicatorKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream, elements_per_rows, dense_row,
output_empty_row_indicator_ptr);
InclusivePrefixSum(dense_row, elements_per_rows, input_row_ends, cuda_stream);
InclusiveBoolPrefixSum(dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream);
RowsSort(indice_num, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_indices, device_id);
hipLaunchKernelGGL(( AssignValueKernel), dim3(block_num), dim3(thread_num_dense_row), 0, cuda_stream,
values_ptr, indices_ptr, sorted_indices, dense_row, default_value, empty_row_count_sum, input_row_ends,
output_values_ptr, output_indices_ptr, indice_num, final_shape, output_reverse_index_map_ptr);
return;
}
#define TEMPLATE_INSTANCE(DTYPE) \
template CUDA_LIB_EXPORT void SparseFillEmptyRows<DTYPE>( \
int64_t * indices_ptr, DTYPE * values_ptr, DTYPE * default_value, int64_t * dense_shape_ptr, int device_id, \
int indice_num, size_t dense_row, int64_t *elements_per_rows, int *rows_are_not_ordered, int64_t *row_indices, \
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape, int64_t *origin_index, int64_t *sorted_key, \
hipStream_t cuda_stream, int64_t *output_indices_ptr, DTYPE *output_values_ptr, \
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr);
TEMPLATE_INSTANCE(float)
TEMPLATE_INSTANCE(half)
TEMPLATE_INSTANCE(double)
TEMPLATE_INSTANCE(int)
TEMPLATE_INSTANCE(int64_t)
TEMPLATE_INSTANCE(uint32_t)
TEMPLATE_INSTANCE(uint64_t)
TEMPLATE_INSTANCE(uint16_t)
TEMPLATE_INSTANCE(uint8_t)
TEMPLATE_INSTANCE(int8_t)
TEMPLATE_INSTANCE(int16_t)
TEMPLATE_INSTANCE(bool)
TEMPLATE_INSTANCE(Complex<float>)
TEMPLATE_INSTANCE(Complex<double>)
|
sparse_fill_empty_rows.cu
|
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_fill_empty_rows.cuh"
#include "plugin/device/cpu/kernel/nnacl/op_base.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
struct BoolToSize {
typedef int index_type;
__device__ index_type operator()(bool x) const { return x ? 1 : 0; }
};
// Todo: According to the sorted_order, assign new value to output ptr.Calculate RevsereIndexMap.
// Input: values_ptr, indice_ptr, sorted_order, dense_row, default_value, emptyrow_count,
// input_row_end, output_values, output_indices.
// Output: output_values, output_indices
template <typename S>
__global__ void AssignValueKernel(Complex<S> *values_ptr, int64_t *indice_ptr, int64_t *sorted_order, size_t dense_row,
Complex<S> *default_value, int *emptyrow_count, int64_t *input_row_end,
Complex<S> *output_values, int64_t *output_indices, int indice_num,
size_t *real_indice_num, int64_t *reverse_index_map) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (i > 0 && input_row_end[i] == input_row_end[i - 1]) {
// Empty row case. Calculate element in ith row by input_row_end[i]-input_row_end[i-1].
int index = input_row_end[i] + emptyrow_count[i] - 1;
output_values[index] = *default_value;
output_indices[2 * index] = i;
output_indices[2 * index + 1] = 0;
} else if (i > 0 && input_row_end[i] > input_row_end[i - 1]) {
// Not an empty row, calculate elements num and assign value to output_indice & output_value.
for (int j = input_row_end[i - 1]; j < input_row_end[i]; j++) {
int index_out = j + emptyrow_count[i];
int index_in = sorted_order[j];
output_values[index_out] = values_ptr[index_in];
output_indices[2 * index_out] = indice_ptr[2 * index_in];
output_indices[2 * index_out + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = index_out;
}
} else if (i == 0 && input_row_end[0] == 0) {
// If the first row has no element.
output_values[0] = *default_value;
output_indices[0] = 0;
output_indices[1] = 0;
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
} else if (i == 0 && input_row_end[0] > 0) {
// The first row is not empty case.
for (int j = 0; j < input_row_end[i]; j++) {
int index_in = sorted_order[j];
output_values[j] = values_ptr[index_in];
output_indices[2 * j] = indice_ptr[2 * index_in];
output_indices[2 * j + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = j;
}
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
}
}
return;
}
// Todo: Calculate the elements num of each row.
// Input: dense_shape_ptr, row_indice, indice_num, cuda_stream
// Output: elements_per_row
__global__ void CalElementPerRowsKernel(int64_t *dense_shape_ptr, int64_t *indices_ptr, int64_t *elements_per_row,
int indice_num) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indice_num; i += blockDim.x * gridDim.x) {
int row = indices_ptr[i];
MsAtomicAdd(&elements_per_row[row], static_cast<int64_t>(1));
}
}
// Todo: Calculate output_empty_row_indicator_ptr.
// Input: elements_per_row, dense_row, output_empty_row_indicator_ptr.
// Output: output_empty_row_indicator_ptr.
__global__ void CalEmptyRowIndicatorKernel(int64_t *elements_per_row, size_t dense_row,
bool *output_empty_row_indicator_ptr) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (elements_per_row[i] == 0) {
output_empty_row_indicator_ptr[i] = 1;
} else {
output_empty_row_indicator_ptr[i] = 0;
}
}
}
// Todo: Extract row index in indice_ptr & Generate an ascend value index.
// Input: indices_ptr, row_indices, originorder, indice_num.
// Output: row_indices, originorder.
__global__ void CopyRowIndiceKernel(int64_t *indices_ptr, int64_t *row_indices, int64_t *origin_index, int indice_num) {
int rank = 2;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < indice_num; i += blockDim.x * gridDim.x) {
row_indices[i] = indices_ptr[i * rank];
origin_index[i] = i;
}
}
// Todo: Calculate the inclusive sum of empty_row_indicator.
// Input: dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream.
// Output: empty_row_count_sum.
void InclusiveBoolPrefixSum(size_t dense_row, bool *output_empty_row_indicator_ptr, int *empty_row_count_sum,
cudaStream_t cuda_stream) {
BoolToSize op;
cub::TransformInputIterator<int, BoolToSize, const bool *> iter(output_empty_row_indicator_ptr, op);
size_t temp_storage_bytes = 0;
(void)cub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, iter, empty_row_count_sum,
static_cast<int>(dense_row), cuda_stream);
void *d_temp_storage = nullptr;
cudaStreamSynchronize(cuda_stream);
(void)cudaMalloc(&d_temp_storage, temp_storage_bytes);
(void)cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, iter, empty_row_count_sum,
static_cast<int>(dense_row), cuda_stream);
cudaStreamSynchronize(cuda_stream);
(void)cudaFree(d_temp_storage);
}
// Todo: Calculate the inclusive sum of elements_per_row.
// Input: dense_row, elements_per_row, input_row_ends, cuda_stream.
// Output: input_row_ends.
void InclusivePrefixSum(size_t dense_row, int64_t *elements_per_row, int64_t *input_row_ends,
cudaStream_t cuda_stream) {
if (dense_row == 0) {
return;
}
size_t temp_storage_bytes = 0;
(void)cub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, elements_per_row, input_row_ends,
static_cast<int>(dense_row), cuda_stream);
void *d_temp_storage = nullptr;
(void)cudaMalloc(&d_temp_storage, temp_storage_bytes);
(void)cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, elements_per_row, input_row_ends,
static_cast<int>(dense_row), cuda_stream);
cudaStreamSynchronize(cuda_stream);
(void)cudaFree(d_temp_storage);
}
// Todo: Sort the row_indice by key into ascend order, so we can get an key-value pair(origin index - sorted_order).
// Input: indice_size, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_order, device_id.
// Output: sorted_key, sorted_order.
__host__ void RowsSort(int64_t indice_size, cudaStream_t cuda_stream, int64_t *dense_shape_ptr, int64_t *row_indices,
int64_t *origin_index, int64_t *sorted_key, int64_t *sorted_order, int device_id) {
size_t temp_storage_ = 0;
(void)cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_, row_indices, static_cast<int64_t *>(nullptr),
origin_index, sorted_order, indice_size, 0, sizeof(int64_t) * 8, cuda_stream);
void *d_temp_ = nullptr;
(void)cudaMalloc(&d_temp_, temp_storage_);
(void)cub::DeviceRadixSort::SortPairs(d_temp_, temp_storage_, row_indices, static_cast<int64_t *>(sorted_key),
origin_index, sorted_order, indice_size, 0, sizeof(int64_t) * 8, cuda_stream);
(void)cudaFree(d_temp_);
}
template <typename S>
__global__ void AssignValueKernel(S *values_ptr, int64_t *indice_ptr, int64_t *sorted_indices, size_t dense_row,
S *default_value, int *emptyrow_count, int64_t *input_row_end, S *output_values,
int64_t *output_indices, int indice_num, size_t *real_indice_num,
int64_t *reverse_index_map) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < dense_row; i += blockDim.x * gridDim.x) {
if (i > 0 && input_row_end[i] == input_row_end[i - 1]) {
// Empty row case. Calculate element in ith row by input_row_end[i]-input_row_end[i-1].
int index = input_row_end[i] + emptyrow_count[i] - 1;
output_values[index] = *default_value;
output_indices[2 * index] = i;
output_indices[2 * index + 1] = 0;
} else if (i > 0 && input_row_end[i] > input_row_end[i - 1]) {
// Not an empty row, calculate elements num and assign value to output_indice & output_value.
for (int j = input_row_end[i - 1]; j < input_row_end[i]; j++) {
int index_out = j + emptyrow_count[i];
int index_in = sorted_indices[j];
output_values[index_out] = values_ptr[index_in];
output_indices[2 * index_out] = indice_ptr[2 * index_in];
output_indices[2 * index_out + 1] = indice_ptr[2 * index_in + 1];
reverse_index_map[index_in] = index_out;
}
} else if (i == 0 && input_row_end[0] == 0) {
// If the first row has no element.
output_values[0] = *default_value;
output_indices[0] = 0;
output_indices[1] = 0;
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
} else if (i == 0 && input_row_end[0] > 0) {
// The first row is not empty case.
for (int j = 0; j < input_row_end[i]; j++) {
output_values[j] = values_ptr[sorted_indices[j]];
output_indices[2 * j] = indice_ptr[2 * sorted_indices[j]];
output_indices[2 * j + 1] = indice_ptr[2 * sorted_indices[j] + 1];
reverse_index_map[sorted_indices[j]] = j;
}
*real_indice_num = indice_num + emptyrow_count[dense_row - 1];
}
}
return;
}
template <typename S>
CUDA_LIB_EXPORT void SparseFillEmptyRows(int64_t *indices_ptr, Complex<S> *values_ptr, Complex<S> *default_value,
int64_t *dense_shape_ptr, int device_id, int indice_num, size_t dense_row,
int64_t *elements_per_rows, int *empty_row_count_sum, int64_t *row_indices,
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape,
int64_t *origin_index, int64_t *sorted_key, cudaStream_t cuda_stream,
int64_t *output_indices_ptr, Complex<S> *output_values_ptr,
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr) {
int thread_num_dense_row = 256 < dense_row ? 256 : dense_row;
cudaDeviceProp prop;
(void)cudaGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = std::min(static_cast<int>(((dense_row - 1) / thread_num_dense_row) + 1), max_blocks);
CopyRowIndiceKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(indices_ptr, row_indices, origin_index,
indice_num);
cudaMemset(elements_per_rows, 0, dense_row * sizeof(int64_t));
int thread_num_indice_num = 256 < indice_num ? 256 : indice_num;
block_num = std::min(static_cast<int>(((indice_num - 1) / thread_num_indice_num) + 1), max_blocks);
CalElementPerRowsKernel<<<block_num, thread_num_indice_num, 0, cuda_stream>>>(dense_shape_ptr, row_indices,
elements_per_rows, indice_num);
CalEmptyRowIndicatorKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(elements_per_rows, dense_row,
output_empty_row_indicator_ptr);
InclusivePrefixSum(dense_row, elements_per_rows, input_row_ends, cuda_stream);
InclusiveBoolPrefixSum(dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream);
RowsSort(indice_num, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_indices, device_id);
AssignValueKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(
values_ptr, indices_ptr, sorted_indices, dense_row, default_value, empty_row_count_sum, input_row_ends,
output_values_ptr, output_indices_ptr, indice_num, final_shape, output_reverse_index_map_ptr);
return;
}
template <typename S>
CUDA_LIB_EXPORT void SparseFillEmptyRows(int64_t *indices_ptr, S *values_ptr, S *default_value,
int64_t *dense_shape_ptr, int device_id, int indice_num, size_t dense_row,
int64_t *elements_per_rows, int *empty_row_count_sum, int64_t *row_indices,
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape,
int64_t *origin_index, int64_t *sorted_key, cudaStream_t cuda_stream,
int64_t *output_indices_ptr, S *output_values_ptr,
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr) {
int thread_num_dense_row = 256 < dense_row ? 256 : dense_row;
cudaDeviceProp prop;
(void)cudaGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = std::min(static_cast<int>(((dense_row - 1) / thread_num_dense_row) + 1), max_blocks);
CopyRowIndiceKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(indices_ptr, row_indices, origin_index,
indice_num);
cudaMemset(elements_per_rows, 0, dense_row * sizeof(int64_t));
int thread_num_indice_num = 256 < indice_num ? 256 : indice_num;
block_num = std::min(static_cast<int>(((indice_num - 1) / thread_num_indice_num) + 1), max_blocks);
CalElementPerRowsKernel<<<block_num, thread_num_indice_num, 0, cuda_stream>>>(dense_shape_ptr, row_indices,
elements_per_rows, indice_num);
CalEmptyRowIndicatorKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(elements_per_rows, dense_row,
output_empty_row_indicator_ptr);
InclusivePrefixSum(dense_row, elements_per_rows, input_row_ends, cuda_stream);
InclusiveBoolPrefixSum(dense_row, output_empty_row_indicator_ptr, empty_row_count_sum, cuda_stream);
RowsSort(indice_num, cuda_stream, dense_shape_ptr, row_indices, origin_index, sorted_key, sorted_indices, device_id);
AssignValueKernel<<<block_num, thread_num_dense_row, 0, cuda_stream>>>(
values_ptr, indices_ptr, sorted_indices, dense_row, default_value, empty_row_count_sum, input_row_ends,
output_values_ptr, output_indices_ptr, indice_num, final_shape, output_reverse_index_map_ptr);
return;
}
#define TEMPLATE_INSTANCE(DTYPE) \
template CUDA_LIB_EXPORT void SparseFillEmptyRows<DTYPE>( \
int64_t * indices_ptr, DTYPE * values_ptr, DTYPE * default_value, int64_t * dense_shape_ptr, int device_id, \
int indice_num, size_t dense_row, int64_t *elements_per_rows, int *rows_are_not_ordered, int64_t *row_indices, \
int64_t *input_row_ends, int64_t *sorted_indices, size_t *final_shape, int64_t *origin_index, int64_t *sorted_key, \
cudaStream_t cuda_stream, int64_t *output_indices_ptr, DTYPE *output_values_ptr, \
bool *output_empty_row_indicator_ptr, int64_t *output_reverse_index_map_ptr);
TEMPLATE_INSTANCE(float)
TEMPLATE_INSTANCE(half)
TEMPLATE_INSTANCE(double)
TEMPLATE_INSTANCE(int)
TEMPLATE_INSTANCE(int64_t)
TEMPLATE_INSTANCE(uint32_t)
TEMPLATE_INSTANCE(uint64_t)
TEMPLATE_INSTANCE(uint16_t)
TEMPLATE_INSTANCE(uint8_t)
TEMPLATE_INSTANCE(int8_t)
TEMPLATE_INSTANCE(int16_t)
TEMPLATE_INSTANCE(bool)
TEMPLATE_INSTANCE(Complex<float>)
TEMPLATE_INSTANCE(Complex<double>)
|
b6b1d19d970ac8973dea588ae76034bea96bd2e8.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* SOFA, Simulation Open-Framework Architecture, version 1.0 beta 4 *
* (c) 2006-2009 MGH, INRIA, USTL, UJF, CNRS *
* *
* This library is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
*******************************************************************************
* SOFA :: Modules *
* *
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: [email protected] *
******************************************************************************/
#include <cuda/CudaCommon.h>
#include <cuda/CudaMath.h>
#include "hip/hip_runtime.h"
extern "C"
{
void CudaUniformMass3f_addMDx(unsigned int size, float mass, void* res, const void* dx);
void CudaUniformMass3f_accFromF(unsigned int size, float mass, void* a, const void* f);
void CudaUniformMass3f_addForce(unsigned int size, const float *mg, void* f);
#ifdef SOFA_GPU_CUDA_DOUBLE
void CudaUniformMass3d_addMDx(unsigned int size, double mass, void* res, const void* dx);
void CudaUniformMass3d_accFromF(unsigned int size, double mass, void* a, const void* f);
void CudaUniformMass3d_addForce(unsigned int size, const double *mg, void* f);
#endif // SOFA_GPU_CUDA_DOUBLE
}
//////////////////////
// GPU-side methods //
//////////////////////
template<class real>
__global__ void CudaUniformMass1t_addMDx_kernel(int size, const real mass, real* res, const real* dx)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
res[index] += dx[index] * mass;
}
}
template<class real>
__global__ void CudaUniformMass3t_addMDx_kernel(int size, const real mass, CudaVec3<real>* res, const CudaVec3<real>* dx)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
//res[index] += dx[index] * mass;
CudaVec3<real> dxi = dx[index];
CudaVec3<real> ri = res[index];
ri += dxi * mass;
res[index] = ri;
}
}
template<class real>
__global__ void CudaUniformMass1t_accFromF_kernel(int size, const real inv_mass, real* a, const real* f)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
a[index] = f[index] * inv_mass;
}
}
template<class real>
__global__ void CudaUniformMass3t_accFromF_kernel(int size, const real inv_mass, CudaVec3<real>* a, const CudaVec3<real>* f)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
//a[index] = f[index] * inv_mass;
CudaVec3<real> fi = f[index];
fi *= inv_mass;
a[index] = fi;
}
}
template<class real>
__global__ void CudaUniformMass1t_addForce_kernel(int size, const real mg, real* f)
{
int index = fastmul(blockIdx.x,BSIZE);
if (index < size)
{
f[index] += mg;
}
}
template<class real>
//__global__ void CudaUniformMass3t_addForce_kernel(int size, const CudaVec3<real> mg, real* f)
__global__ void CudaUniformMass3t_addForce_kernel(int size, real mg_x, real mg_y, real mg_z, real* f)
{
//int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
//f[index] += mg;
f += fastmul(blockIdx.x,BSIZE*3); //blockIdx.x*BSIZE*3;
int index = threadIdx.x;
__shared__ real temp[BSIZE*3];
temp[index] = f[index];
temp[index+BSIZE] = f[index+BSIZE];
temp[index+2*BSIZE] = f[index+2*BSIZE];
__syncthreads();
if (fastmul(blockIdx.x,BSIZE)+threadIdx.x < size)
{
int index3 = fastmul(index,3); //3*index;
temp[index3+0] += mg_x;
temp[index3+1] += mg_y;
temp[index3+2] += mg_z;
}
__syncthreads();
f[index] = temp[index];
f[index+BSIZE] = temp[index+BSIZE];
f[index+2*BSIZE] = temp[index+2*BSIZE];
}
//////////////////////
// CPU-side methods //
//////////////////////
void CudaUniformMass3f_addMDx(unsigned int size, float mass, void* res, const void* dx)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_addMDx_kernel<float><<< grid, threads >>>(size, mass, (CudaVec3<float>*)res, (const CudaVec3<float>*)dx);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass1t_addMDx_kernel<float>), dim3(grid), dim3(threads) , 0, 0, 3*size, mass, (float*)res, (const float*)dx);
}
void CudaUniformMass3f_accFromF(unsigned int size, float mass, void* a, const void* f)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_accFromF_kernel<float><<< grid, threads >>>(size, 1.0f/mass, (CudaVec3<float>*)a, (const CudaVec3<float>*)f);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass1t_accFromF_kernel<float>), dim3(grid), dim3(threads) , 0, 0, 3*size, 1.0f/mass, (float*)a, (const float*)f);
}
void CudaUniformMass3f_addForce(unsigned int size, const float *mg, void* f)
{
dim3 threads(BSIZE,1);
dim3 grid((size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass3t_addForce_kernel<float>), dim3(grid), dim3(threads) , 0, 0, size, mg[0], mg[1], mg[2], (float*)f);
}
#ifdef SOFA_GPU_CUDA_DOUBLE
void CudaUniformMass3d_addMDx(unsigned int size, double mass, void* res, const void* dx)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_addMDx_kernel<double><<< grid, threads >>>(size, mass, (CudaVec3<double>*)res, (const CudaVec3<double>*)dx);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass1t_addMDx_kernel<double>), dim3(grid), dim3(threads) , 0, 0, 3*size, mass, (double*)res, (const double*)dx);
}
void CudaUniformMass3d_accFromF(unsigned int size, double mass, void* a, const void* f)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_accFromF_kernel<double><<< grid, threads >>>(size, 1.0f/mass, (CudaVec3<double>*)a, (const CudaVec3<double>*)f);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass1t_accFromF_kernel<double>), dim3(grid), dim3(threads) , 0, 0, 3*size, 1.0f/mass, (double*)a, (const double*)f);
}
void CudaUniformMass3d_addForce(unsigned int size, const double *mg, void* f)
{
dim3 threads(BSIZE,1);
dim3 grid((size+BSIZE-1)/BSIZE,1);
hipLaunchKernelGGL(( CudaUniformMass3t_addForce_kernel<double>), dim3(grid), dim3(threads) , 0, 0, size, mg[0], mg[1], mg[2], (double*)f);
}
#endif // SOFA_GPU_CUDA_DOUBLE
|
b6b1d19d970ac8973dea588ae76034bea96bd2e8.cu
|
/******************************************************************************
* SOFA, Simulation Open-Framework Architecture, version 1.0 beta 4 *
* (c) 2006-2009 MGH, INRIA, USTL, UJF, CNRS *
* *
* This library is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
*******************************************************************************
* SOFA :: Modules *
* *
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: [email protected] *
******************************************************************************/
#include <cuda/CudaCommon.h>
#include <cuda/CudaMath.h>
#include "cuda.h"
extern "C"
{
void CudaUniformMass3f_addMDx(unsigned int size, float mass, void* res, const void* dx);
void CudaUniformMass3f_accFromF(unsigned int size, float mass, void* a, const void* f);
void CudaUniformMass3f_addForce(unsigned int size, const float *mg, void* f);
#ifdef SOFA_GPU_CUDA_DOUBLE
void CudaUniformMass3d_addMDx(unsigned int size, double mass, void* res, const void* dx);
void CudaUniformMass3d_accFromF(unsigned int size, double mass, void* a, const void* f);
void CudaUniformMass3d_addForce(unsigned int size, const double *mg, void* f);
#endif // SOFA_GPU_CUDA_DOUBLE
}
//////////////////////
// GPU-side methods //
//////////////////////
template<class real>
__global__ void CudaUniformMass1t_addMDx_kernel(int size, const real mass, real* res, const real* dx)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
res[index] += dx[index] * mass;
}
}
template<class real>
__global__ void CudaUniformMass3t_addMDx_kernel(int size, const real mass, CudaVec3<real>* res, const CudaVec3<real>* dx)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
//res[index] += dx[index] * mass;
CudaVec3<real> dxi = dx[index];
CudaVec3<real> ri = res[index];
ri += dxi * mass;
res[index] = ri;
}
}
template<class real>
__global__ void CudaUniformMass1t_accFromF_kernel(int size, const real inv_mass, real* a, const real* f)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
a[index] = f[index] * inv_mass;
}
}
template<class real>
__global__ void CudaUniformMass3t_accFromF_kernel(int size, const real inv_mass, CudaVec3<real>* a, const CudaVec3<real>* f)
{
int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
if (index < size)
{
//a[index] = f[index] * inv_mass;
CudaVec3<real> fi = f[index];
fi *= inv_mass;
a[index] = fi;
}
}
template<class real>
__global__ void CudaUniformMass1t_addForce_kernel(int size, const real mg, real* f)
{
int index = fastmul(blockIdx.x,BSIZE);
if (index < size)
{
f[index] += mg;
}
}
template<class real>
//__global__ void CudaUniformMass3t_addForce_kernel(int size, const CudaVec3<real> mg, real* f)
__global__ void CudaUniformMass3t_addForce_kernel(int size, real mg_x, real mg_y, real mg_z, real* f)
{
//int index = fastmul(blockIdx.x,BSIZE)+threadIdx.x;
//f[index] += mg;
f += fastmul(blockIdx.x,BSIZE*3); //blockIdx.x*BSIZE*3;
int index = threadIdx.x;
__shared__ real temp[BSIZE*3];
temp[index] = f[index];
temp[index+BSIZE] = f[index+BSIZE];
temp[index+2*BSIZE] = f[index+2*BSIZE];
__syncthreads();
if (fastmul(blockIdx.x,BSIZE)+threadIdx.x < size)
{
int index3 = fastmul(index,3); //3*index;
temp[index3+0] += mg_x;
temp[index3+1] += mg_y;
temp[index3+2] += mg_z;
}
__syncthreads();
f[index] = temp[index];
f[index+BSIZE] = temp[index+BSIZE];
f[index+2*BSIZE] = temp[index+2*BSIZE];
}
//////////////////////
// CPU-side methods //
//////////////////////
void CudaUniformMass3f_addMDx(unsigned int size, float mass, void* res, const void* dx)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_addMDx_kernel<float><<< grid, threads >>>(size, mass, (CudaVec3<float>*)res, (const CudaVec3<float>*)dx);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
CudaUniformMass1t_addMDx_kernel<float><<< grid, threads >>>(3*size, mass, (float*)res, (const float*)dx);
}
void CudaUniformMass3f_accFromF(unsigned int size, float mass, void* a, const void* f)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_accFromF_kernel<float><<< grid, threads >>>(size, 1.0f/mass, (CudaVec3<float>*)a, (const CudaVec3<float>*)f);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
CudaUniformMass1t_accFromF_kernel<float><<< grid, threads >>>(3*size, 1.0f/mass, (float*)a, (const float*)f);
}
void CudaUniformMass3f_addForce(unsigned int size, const float *mg, void* f)
{
dim3 threads(BSIZE,1);
dim3 grid((size+BSIZE-1)/BSIZE,1);
CudaUniformMass3t_addForce_kernel<float><<< grid, threads >>>(size, mg[0], mg[1], mg[2], (float*)f);
}
#ifdef SOFA_GPU_CUDA_DOUBLE
void CudaUniformMass3d_addMDx(unsigned int size, double mass, void* res, const void* dx)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_addMDx_kernel<double><<< grid, threads >>>(size, mass, (CudaVec3<double>*)res, (const CudaVec3<double>*)dx);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
CudaUniformMass1t_addMDx_kernel<double><<< grid, threads >>>(3*size, mass, (double*)res, (const double*)dx);
}
void CudaUniformMass3d_accFromF(unsigned int size, double mass, void* a, const void* f)
{
dim3 threads(BSIZE,1);
//dim3 grid((size+BSIZE-1)/BSIZE,1);
//CudaUniformMass3t_accFromF_kernel<double><<< grid, threads >>>(size, 1.0f/mass, (CudaVec3<double>*)a, (const CudaVec3<double>*)f);
dim3 grid((3*size+BSIZE-1)/BSIZE,1);
CudaUniformMass1t_accFromF_kernel<double><<< grid, threads >>>(3*size, 1.0f/mass, (double*)a, (const double*)f);
}
void CudaUniformMass3d_addForce(unsigned int size, const double *mg, void* f)
{
dim3 threads(BSIZE,1);
dim3 grid((size+BSIZE-1)/BSIZE,1);
CudaUniformMass3t_addForce_kernel<double><<< grid, threads >>>(size, mg[0], mg[1], mg[2], (double*)f);
}
#endif // SOFA_GPU_CUDA_DOUBLE
|
40698075f6b997964b7dc45bbfd7234fdeac1cb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#define N 4096
float *x, *y;
float *dev_x;
float *dev_y;
#define myabs(a) (((a) > 0) ? (a):(-(a)))
__global__ void matrixadd(float* x, float* y, int NN, float a1, float a2, float a3, float a4, float *diff)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int index = (NN+2)*(i+1);
diff[i] = -1.0;
for(int k=1; k <= NN; k++){
x[index + k] = a2 * y[index + k - 1] + a4 * y[index + k + 1] + a1 * y[index + k - NN - 2] + a3 * y[index + k + NN + 2];
if(myabs(x[index + k] - y[index + k]) > diff[i])
diff[i] = myabs(x[index+k] - y[index+k]);
}
//__syncthreads();
}
int main(void)
{
int NN;
float a1, a2, a3, a4, a5, a6;
float MAXDIFF;
int i, j;
int t, t1, t2;
float maxdiff1;
float *mydiff, *dev_mydiff;
int iteration;
FILE *fp;
if((fp = fopen("input.jacobi","r+")) == NULL){
printf("File not found.\n");
exit(1);
}
fscanf(fp,"%d %f %f %f %f %f %f %f", &NN, &a1, &a2, &a3, &a4, &a5, &a6, &MAXDIFF);
printf("%d %f %f %f %f %f %f %f\n", NN, a1, a2, a3, a4, a5, a6, MAXDIFF);
/* a1 = a2 = a3 = a4 = 0.25; a6 = 0; a5 = 0.1;
MAXDIFF = 0.0001;
*/
/* 1. allocate host memory */
x = (float*)malloc( (NN+2)*(NN+2)*sizeof(float) );
y = (float*)malloc( (NN+2)*(NN+2)*sizeof(float) );
printf("maxdiff = %13.12f\n", MAXDIFF);
for (i=1; i<=NN+1; i++) {
x[i] = a5*i;
y[i] = a5*i;
x[i*(NN+2)] = 0.0;
y[i*(NN+2)] = 0.0;
x[i+(NN+1)*(NN+2)] = a6*i;
y[i+(NN+1)*(NN+2)] = a6*i;
x[NN+1+i*(NN+2)] = 0.0;
y[NN+1+i*(NN+2)] = 0.0;
}
for (i=1; i<=NN; i++)
for (j=1; j<=NN; j++) {
x[i+j*NN] = 0.0;
y[i+j*NN] = 0.0;
}
printf("maxdiff=%13.12f\n", MAXDIFF);
t = 0; t1 = 1;
maxdiff1 = 100000.0;
iteration = 0;
mydiff = (float*) malloc( NN*sizeof(float) );
int blockSize = 16;
hipMalloc( &dev_x, (NN+2)*(NN+2)*sizeof(float) );
hipMalloc( &dev_y, (NN+2)*(NN+2)*sizeof(float) );
hipMalloc( &dev_mydiff, NN*sizeof(float) );
hipMemcpy( dev_x, x, (NN+2)*(NN+2)*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( dev_y, y, (NN+2)*(NN+2)*sizeof(float), hipMemcpyHostToDevice );
while (maxdiff1 > MAXDIFF) {
maxdiff1 = -1.0;
hipMemcpy( dev_mydiff, mydiff, NN*sizeof(float), hipMemcpyHostToDevice );
if (t == 0)hipLaunchKernelGGL(( matrixadd), dim3((NN/blockSize)), dim3(blockSize), 0, 0, dev_x, dev_y, NN, a1, a2, a3, a4, dev_mydiff);
else if(t == 1)hipLaunchKernelGGL(( matrixadd), dim3((NN/blockSize)), dim3(blockSize), 0, 0, dev_y, dev_x, NN, a1, a2, a3, a4, dev_mydiff);
hipMemcpy( mydiff, dev_mydiff, NN*sizeof(float), hipMemcpyDeviceToHost );
for(i = 0; i < NN; i++){
//for(j = 0; j < NN; j++){
if(maxdiff1 < mydiff[i])
maxdiff1 = mydiff[i];
mydiff[i] = -1;
//}
}
t2 = t; t = t1; t1 = t2;
printf("iteration = %d, maxdiff1 = %f, MAXDIFF = %f\n",
iteration++, maxdiff1, MAXDIFF);
}
printf("MAXDIFF = %f, maxdiff = %f\n", MAXDIFF, maxdiff1);
hipMemcpy( x, dev_x, (NN+2)*(NN+2)*sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( y, dev_y, (NN+2)*(NN+2)*sizeof(float), hipMemcpyDeviceToHost );
if ((fp = fopen("cuda.output", "w+")) < 0) {
fprintf(stderr, "Cannot open file proj3.output.\n");
exit(0);
}
if(t == 1){
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", x[NN/2+j*(NN+2)])) < 0) {
fprintf(stderr, "write error %d %d.\n", j, t);
exit(0);
}
}
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", x[j+(NN/2)*(NN+2)])) < 0) {
fprintf(stderr, "write error. %d %d\n", j, t);
exit(0);
}
}
}
else{
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", y[NN/2+j*(NN+2)])) < 0) {
fprintf(stderr, "write error %d %d.\n", j, t);
exit(0);
}
}
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", y[j+(NN/2)*(NN+2)])) < 0) {
fprintf(stderr, "write error. %d %d\n", j, t);
exit(0);
}
}
}
close(i);
free(mydiff);
free(x);
free(y);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_mydiff);
return 0;
}
|
40698075f6b997964b7dc45bbfd7234fdeac1cb1.cu
|
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#define N 4096
float *x, *y;
float *dev_x;
float *dev_y;
#define myabs(a) (((a) > 0) ? (a):(-(a)))
__global__ void matrixadd(float* x, float* y, int NN, float a1, float a2, float a3, float a4, float *diff)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int index = (NN+2)*(i+1);
diff[i] = -1.0;
for(int k=1; k <= NN; k++){
x[index + k] = a2 * y[index + k - 1] + a4 * y[index + k + 1] + a1 * y[index + k - NN - 2] + a3 * y[index + k + NN + 2];
if(myabs(x[index + k] - y[index + k]) > diff[i])
diff[i] = myabs(x[index+k] - y[index+k]);
}
//__syncthreads();
}
int main(void)
{
int NN;
float a1, a2, a3, a4, a5, a6;
float MAXDIFF;
int i, j;
int t, t1, t2;
float maxdiff1;
float *mydiff, *dev_mydiff;
int iteration;
FILE *fp;
if((fp = fopen("input.jacobi","r+")) == NULL){
printf("File not found.\n");
exit(1);
}
fscanf(fp,"%d %f %f %f %f %f %f %f", &NN, &a1, &a2, &a3, &a4, &a5, &a6, &MAXDIFF);
printf("%d %f %f %f %f %f %f %f\n", NN, a1, a2, a3, a4, a5, a6, MAXDIFF);
/* a1 = a2 = a3 = a4 = 0.25; a6 = 0; a5 = 0.1;
MAXDIFF = 0.0001;
*/
/* 1. allocate host memory */
x = (float*)malloc( (NN+2)*(NN+2)*sizeof(float) );
y = (float*)malloc( (NN+2)*(NN+2)*sizeof(float) );
printf("maxdiff = %13.12f\n", MAXDIFF);
for (i=1; i<=NN+1; i++) {
x[i] = a5*i;
y[i] = a5*i;
x[i*(NN+2)] = 0.0;
y[i*(NN+2)] = 0.0;
x[i+(NN+1)*(NN+2)] = a6*i;
y[i+(NN+1)*(NN+2)] = a6*i;
x[NN+1+i*(NN+2)] = 0.0;
y[NN+1+i*(NN+2)] = 0.0;
}
for (i=1; i<=NN; i++)
for (j=1; j<=NN; j++) {
x[i+j*NN] = 0.0;
y[i+j*NN] = 0.0;
}
printf("maxdiff=%13.12f\n", MAXDIFF);
t = 0; t1 = 1;
maxdiff1 = 100000.0;
iteration = 0;
mydiff = (float*) malloc( NN*sizeof(float) );
int blockSize = 16;
cudaMalloc( &dev_x, (NN+2)*(NN+2)*sizeof(float) );
cudaMalloc( &dev_y, (NN+2)*(NN+2)*sizeof(float) );
cudaMalloc( &dev_mydiff, NN*sizeof(float) );
cudaMemcpy( dev_x, x, (NN+2)*(NN+2)*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_y, y, (NN+2)*(NN+2)*sizeof(float), cudaMemcpyHostToDevice );
while (maxdiff1 > MAXDIFF) {
maxdiff1 = -1.0;
cudaMemcpy( dev_mydiff, mydiff, NN*sizeof(float), cudaMemcpyHostToDevice );
if (t == 0) matrixadd<<<(NN/blockSize), blockSize>>>(dev_x, dev_y, NN, a1, a2, a3, a4, dev_mydiff);
else if(t == 1) matrixadd<<<(NN/blockSize), blockSize>>>(dev_y, dev_x, NN, a1, a2, a3, a4, dev_mydiff);
cudaMemcpy( mydiff, dev_mydiff, NN*sizeof(float), cudaMemcpyDeviceToHost );
for(i = 0; i < NN; i++){
//for(j = 0; j < NN; j++){
if(maxdiff1 < mydiff[i])
maxdiff1 = mydiff[i];
mydiff[i] = -1;
//}
}
t2 = t; t = t1; t1 = t2;
printf("iteration = %d, maxdiff1 = %f, MAXDIFF = %f\n",
iteration++, maxdiff1, MAXDIFF);
}
printf("MAXDIFF = %f, maxdiff = %f\n", MAXDIFF, maxdiff1);
cudaMemcpy( x, dev_x, (NN+2)*(NN+2)*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( y, dev_y, (NN+2)*(NN+2)*sizeof(float), cudaMemcpyDeviceToHost );
if ((fp = fopen("cuda.output", "w+")) < 0) {
fprintf(stderr, "Cannot open file proj3.output.\n");
exit(0);
}
if(t == 1){
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", x[NN/2+j*(NN+2)])) < 0) {
fprintf(stderr, "write error %d %d.\n", j, t);
exit(0);
}
}
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", x[j+(NN/2)*(NN+2)])) < 0) {
fprintf(stderr, "write error. %d %d\n", j, t);
exit(0);
}
}
}
else{
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", y[NN/2+j*(NN+2)])) < 0) {
fprintf(stderr, "write error %d %d.\n", j, t);
exit(0);
}
}
for (j = 0; j <= NN + 1; j++) {
if ((t = fprintf(fp, "%.10f\t", y[j+(NN/2)*(NN+2)])) < 0) {
fprintf(stderr, "write error. %d %d\n", j, t);
exit(0);
}
}
}
close(i);
free(mydiff);
free(x);
free(y);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_mydiff);
return 0;
}
|
d04e4323b50934eb2289c7d5d273112831d8ef85.hip
|
// !!! This is a file automatically generated by hipify!!!
/*//------------------------------------------------------------------------------------------------------------
| TEST tempistiche del metodo di swap necessario per il pivoting
|
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int device_id =0;
hipSetDevice(device_id);
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, device_id);
printf("Scheda Utilizzata: %s \n\n", prop.name);
int n1=10010;
int n2=10000;
//cout<<" #righe = ";cin>>n1;cout<<endl;
//cout<<" #colonne = ";cin>>n2;cout<<endl;
//Eventi per le statistiche
hipEvent_t T1, T2, T3, T4;
hipEventCreate(&T1);
hipEventCreate(&T2);
hipEventCreate(&T3);
hipEventCreate(&T4);
float diff_time;
matrice matA (n1,n2);
matA.Init_Rand(-5.0,5.0);
//matA.print();
cout<<"\n SWAP RIGA 0 1, secondo la cpu"<<endl;
hipEventRecord(T3,0);
matA.Cpu_Swap_Row(0,1);
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
//matA.Cpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP Col 0 1, secondo la cpu"<<endl;
hipEventRecord(T3,0);
matA.Cpu_Swap_Col(0,1);
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
//matA.Cpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP RIGA 0 1, secondo la gpu"<<endl;
hipEventRecord(T3,0);
matA.Gpu_Swap_Row(0,1);
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
//matA.Gpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP Col 0 1, secondo la gpu"<<endl;
hipEventRecord(T3,0);
matA.Gpu_Swap_Col(0,1);
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
//matA.Gpu_print();
cout << "tempo=" << diff_time<<"\n";
return 0;
}
|
d04e4323b50934eb2289c7d5d273112831d8ef85.cu
|
/*//------------------------------------------------------------------------------------------------------------
| TEST tempistiche del metodo di swap necessario per il pivoting
|
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int device_id =0;
cudaSetDevice(device_id);
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, device_id);
printf("Scheda Utilizzata: %s \n\n", prop.name);
int n1=10010;
int n2=10000;
//cout<<" #righe = ";cin>>n1;cout<<endl;
//cout<<" #colonne = ";cin>>n2;cout<<endl;
//Eventi per le statistiche
cudaEvent_t T1, T2, T3, T4;
cudaEventCreate(&T1);
cudaEventCreate(&T2);
cudaEventCreate(&T3);
cudaEventCreate(&T4);
float diff_time;
matrice matA (n1,n2);
matA.Init_Rand(-5.0,5.0);
//matA.print();
cout<<"\n SWAP RIGA 0 1, secondo la cpu"<<endl;
cudaEventRecord(T3,0);
matA.Cpu_Swap_Row(0,1);
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
//matA.Cpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP Col 0 1, secondo la cpu"<<endl;
cudaEventRecord(T3,0);
matA.Cpu_Swap_Col(0,1);
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
//matA.Cpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP RIGA 0 1, secondo la gpu"<<endl;
cudaEventRecord(T3,0);
matA.Gpu_Swap_Row(0,1);
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
//matA.Gpu_print();
cout << "tempo=" << diff_time<<"\n";
cout<<"\n SWAP Col 0 1, secondo la gpu"<<endl;
cudaEventRecord(T3,0);
matA.Gpu_Swap_Col(0,1);
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
//matA.Gpu_print();
cout << "tempo=" << diff_time<<"\n";
return 0;
}
|
0dc52494e2eed4b5c782893ebe558087b62cfd3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "plugin.h"
#include "kernel.h"
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherTopDetections_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* keepCount,
T_BBOX* topDetections)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
/*
* It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes.
* We set the bounding boxes parameters as the parameters shown below.
* These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously.
* It is also not going to affect the count of valid bounding boxes (keepCount).
* These data will probably never be used (because we have keepCount).
*/
if (index == -1)
{
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = -1; // label
topDetections[i * 7 + 2] = 0; // confidence score
// score==0 will not pass the VisualizeBBox check
topDetections[i * 7 + 3] = 0; // bbox xmin
topDetections[i * 7 + 4] = 0; // bbox ymin
topDetections[i * 7 + 5] = 0; // bbox xmax
topDetections[i * 7 + 6] = 0; // bbox ymax
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
topDetections[i * 7 + 2] = score; // confidence score
// clipped bbox xmin
topDetections[i * 7 + 3] = max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox ymin
topDetections[i * 7 + 4] = max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox xmax
topDetections[i * 7 + 5] = max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox ymax
topDetections[i * 7 + 6] = max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.));
// Atomic add to increase the count of valid keepTopK bounding boxes
// Without having to do manual sync.
atomicAdd(&keepCount[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherTopDetections_gpu(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections)
{
hipMemsetAsync(keepCount, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
hipLaunchKernelGGL(( gatherTopDetections_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) keepCount, (T_BBOX*) topDetections);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherTopDetections LAUNCH CONFIG
typedef pluginStatus_t (*gtdFunc)(hipStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*);
struct gtdLaunchConfig
{
DataType t_bbox;
DataType t_score;
gtdFunc function;
gtdLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const gtdLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::vector<gtdLaunchConfig> gtdFuncVec;
bool gtdInit()
{
gtdFuncVec.push_back(gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT,
gatherTopDetections_gpu<float, float>));
return true;
}
static bool initialized = gtdInit();
pluginStatus_t gatherTopDetections(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections)
{
gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < gtdFuncVec.size(); ++i)
{
if (lc == gtdFuncVec[i])
{
DEBUG_PRINTF("gatherTopDetections kernel %d\n", i);
return gtdFuncVec[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
keepCount,
topDetections);
}
}
return STATUS_BAD_PARAM;
}
|
0dc52494e2eed4b5c782893ebe558087b62cfd3d.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "plugin.h"
#include "kernel.h"
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherTopDetections_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* keepCount,
T_BBOX* topDetections)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
/*
* It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes.
* We set the bounding boxes parameters as the parameters shown below.
* These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously.
* It is also not going to affect the count of valid bounding boxes (keepCount).
* These data will probably never be used (because we have keepCount).
*/
if (index == -1)
{
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = -1; // label
topDetections[i * 7 + 2] = 0; // confidence score
// score==0 will not pass the VisualizeBBox check
topDetections[i * 7 + 3] = 0; // bbox xmin
topDetections[i * 7 + 4] = 0; // bbox ymin
topDetections[i * 7 + 5] = 0; // bbox xmax
topDetections[i * 7 + 6] = 0; // bbox ymax
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
topDetections[i * 7 + 2] = score; // confidence score
// clipped bbox xmin
topDetections[i * 7 + 3] = max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox ymin
topDetections[i * 7 + 4] = max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox xmax
topDetections[i * 7 + 5] = max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.));
// clipped bbox ymax
topDetections[i * 7 + 6] = max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.));
// Atomic add to increase the count of valid keepTopK bounding boxes
// Without having to do manual sync.
atomicAdd(&keepCount[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherTopDetections_gpu(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections)
{
cudaMemsetAsync(keepCount, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
gatherTopDetections_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) keepCount, (T_BBOX*) topDetections);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherTopDetections LAUNCH CONFIG
typedef pluginStatus_t (*gtdFunc)(cudaStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*);
struct gtdLaunchConfig
{
DataType t_bbox;
DataType t_score;
gtdFunc function;
gtdLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const gtdLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::vector<gtdLaunchConfig> gtdFuncVec;
bool gtdInit()
{
gtdFuncVec.push_back(gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT,
gatherTopDetections_gpu<float, float>));
return true;
}
static bool initialized = gtdInit();
pluginStatus_t gatherTopDetections(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections)
{
gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < gtdFuncVec.size(); ++i)
{
if (lc == gtdFuncVec[i])
{
DEBUG_PRINTF("gatherTopDetections kernel %d\n", i);
return gtdFuncVec[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
keepCount,
topDetections);
}
}
return STATUS_BAD_PARAM;
}
|
6a454731eb4c17fc7a9446362ee2cff254b90be5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename Iterator1,
typename Iterator2,
typename Iterator3,
typename Iterator4,
typename Iterator5,
typename Iterator6,
typename Iterator7>
__global__
void merge_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::merge_by_key(thrust::seq, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
void TestMergeByKeyDeviceSeq()
{
thrust::device_vector<int> a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
thrust::device_vector<int> ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
thrust::device_vector<int> result_key(7), result_val(7);
typedef typename thrust::device_vector<int>::iterator Iterator;
thrust::device_vector<thrust::pair<Iterator,Iterator> > result_ends(1);
hipLaunchKernelGGL(( merge_by_key_kernel), dim3(1),dim3(1), 0, 0, a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin(),
result_ends.begin());
thrust::pair<Iterator,Iterator> ends = result_ends[0];
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceSeq);
void TestMergeByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
Vector ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
Vector result_key(7), result_val(7);
hipStream_t s;
hipStreamCreate(&s);
thrust::pair<Iterator,Iterator> ends =
thrust::merge_by_key(thrust::hip::par(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeByKeyCudaStreams);
|
6a454731eb4c17fc7a9446362ee2cff254b90be5.cu
|
#include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename Iterator1,
typename Iterator2,
typename Iterator3,
typename Iterator4,
typename Iterator5,
typename Iterator6,
typename Iterator7>
__global__
void merge_by_key_kernel(Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::merge_by_key(thrust::seq, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
void TestMergeByKeyDeviceSeq()
{
thrust::device_vector<int> a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
thrust::device_vector<int> ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
thrust::device_vector<int> result_key(7), result_val(7);
typedef typename thrust::device_vector<int>::iterator Iterator;
thrust::device_vector<thrust::pair<Iterator,Iterator> > result_ends(1);
merge_by_key_kernel<<<1,1>>>(a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin(),
result_ends.begin());
thrust::pair<Iterator,Iterator> ends = result_ends[0];
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceSeq);
void TestMergeByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
Vector ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
Vector result_key(7), result_val(7);
cudaStream_t s;
cudaStreamCreate(&s);
thrust::pair<Iterator,Iterator> ends =
thrust::merge_by_key(thrust::cuda::par(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeByKeyCudaStreams);
|
ddecdf2149f131acc7e5bdc5cc0963b2dba7d2b6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void pBlob_ReLU(Dtype* gpuPtr,int numValues){
Dtype* cpuPtr = new Dtype[numValues];
hipMemcpy(cpuPtr,gpuPtr,numValues*sizeof(Dtype),hipMemcpyDeviceToHost);
for (int i=0;i<numValues;++i){
std::cout<< cpuPtr[i] <<",";
}
std::cout<<std::endl;
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (use_log_){
std::cout<< "ReLU Bwd top diff"<<std::endl;
pBlob_ReLU(top[0]->mutable_gpu_diff(),20 * top[0]->shape(1) * top[0]->shape(2) * top[0]->shape(3));
std::cout<< std::endl;
}
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
ddecdf2149f131acc7e5bdc5cc0963b2dba7d2b6.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void pBlob_ReLU(Dtype* gpuPtr,int numValues){
Dtype* cpuPtr = new Dtype[numValues];
cudaMemcpy(cpuPtr,gpuPtr,numValues*sizeof(Dtype),cudaMemcpyDeviceToHost);
for (int i=0;i<numValues;++i){
std::cout<< cpuPtr[i] <<",";
}
std::cout<<std::endl;
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (use_log_){
std::cout<< "ReLU Bwd top diff"<<std::endl;
pBlob_ReLU(top[0]->mutable_gpu_diff(),20 * top[0]->shape(1) * top[0]->shape(2) * top[0]->shape(3));
std::cout<< std::endl;
}
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
86380e1fc19a6431e1db77b6809775b90e56f59a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
#include <fstream>
#include <cstring>
#include <random>
#define ARRAYSIZE (100000)
#define NUM_THREADS (1024) // Always 1 more thread if arraysize is odd
#define BLOCK_SIZE 128
using namespace std;
void PrintText(int out[ARRAYSIZE], int out2[ARRAYSIZE])
{
for (int i = 0; i < ARRAYSIZE; i++)
{
printf("[%d]: %d \"%d\" \n", i, out2[i], out2[i] - out[i]);
}
}
int * OddEvenSortSingleThread(int in[])
{
int *out = new int[ARRAYSIZE];
for (int i = 0; i < ARRAYSIZE; i++)
{
out[i] = in[i];
}
// Run the sorting ARRAYSIZE times to make sure that everything is sorted
// NOTE: We have two steps at once
// However, if the ARRAYSIZE is an odd number, we need to make sure that the loop runs enough times
for (int i = 0; i < (ARRAYSIZE + 1) / 2; i++)
{
// Even
for (int j = 0; j < ARRAYSIZE - 1; j += 2)
{
if (out[j] > out[j + 1])
{
swap(out[j], out[j + 1]);
}
}
// Odd
for (int j = 1; j < ARRAYSIZE - 1; j += 2)
{
if (out[j] > out[j + 1])
{
swap(out[j], out[j + 1]);
}
}
}
return out;
}
__device__ void swap_d(int randomNum_d[], int id, int threadStep)
{
int temp;
int thisID = id;
int nextID = id + 1;
int step = 0;
while (nextID + step < ARRAYSIZE)
{
if (randomNum_d[thisID + step] > randomNum_d[nextID + step])
{
temp = randomNum_d[thisID + step];
randomNum_d[thisID + step] = randomNum_d[nextID + step];
randomNum_d[nextID + step] = temp;
}
step += threadStep;
}
}
__global__ void EvenSortParallel(int randomNum_d[], int threadStep)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
// EVEN
// Swap values the thread is assigned with
swap_d(randomNum_d, id * 2, threadStep);
__syncthreads();
}
__global__ void OddSortParallel(int randomNum_d[], int threadStep)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
// ODD
// Swap values the thread is assigned with
swap_d(randomNum_d, id * 2 + 1, threadStep);
__syncthreads();
}
int main(int argc, char* argv[])
{
bool saveToFile = true;
// Loop through args and check for -nodebug
for (int i = 0; i < argc; ++i)
{
if (std::string(argv[i]) == "-noFile") saveToFile = false;
}
/* initialize random seed: */
srand(time(NULL));
std::random_device rd;
std::uniform_int_distribution<int> dist;
int* randomNum = new int[ARRAYSIZE];
for (int i = 0; i < ARRAYSIZE; i++)
{
randomNum[i] = dist(rd) % ARRAYSIZE;
}
// Parallel --------------------------------------------
int size = ARRAYSIZE * sizeof(int);
int *out2 = new int[ARRAYSIZE];
int threadStep = ceil((float) NUM_THREADS / (float) BLOCK_SIZE) * BLOCK_SIZE * 2;
int *randomNum_d;
// Transfer array to device memory
hipMalloc((void**)&randomNum_d, size);
hipMemcpy(randomNum_d, randomNum, size, hipMemcpyHostToDevice);
for (int i = 0; i < (ARRAYSIZE + 1) / 2; i++)
{
// Run ceil(ARRAYSIZE/NUM_THREADS) block of NUM_THREADS threads each --- function<<<grid, threads>>>();
hipLaunchKernelGGL(( EvenSortParallel) , dim3(ceil((float)NUM_THREADS / (float)BLOCK_SIZE)), dim3(BLOCK_SIZE) , 0, 0, randomNum_d, threadStep);
hipLaunchKernelGGL(( OddSortParallel) , dim3(ceil((float)NUM_THREADS / (float)BLOCK_SIZE)), dim3(BLOCK_SIZE) , 0, 0, randomNum_d, threadStep);
}
// Transfer array from device to host
hipMemcpy(out2, randomNum_d, size, hipMemcpyDeviceToHost);
// Single Thread --------------------------------------
/*
int * out = new int[ARRAYSIZE];
out = OddEvenSortSingleThread(randomNum);
*/
// Debug the output
//PrintText(out, out2);
// Save result to file
if (saveToFile)
{
std::string fileName = "gpuSorted.txt";
ofstream outputFile(fileName);
if (outputFile.is_open())
{
for (int i = 0; i < ARRAYSIZE; i++)
{
outputFile << out2[i] << std::endl;
}
outputFile.close();
}
else
{
std::cout << fileName.c_str() << " could not be opened" << std::endl;
}
}
// Free memory
hipFree(randomNum_d);
return 0;
}
|
86380e1fc19a6431e1db77b6809775b90e56f59a.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
#include <fstream>
#include <cstring>
#include <random>
#define ARRAYSIZE (100000)
#define NUM_THREADS (1024) // Always 1 more thread if arraysize is odd
#define BLOCK_SIZE 128
using namespace std;
void PrintText(int out[ARRAYSIZE], int out2[ARRAYSIZE])
{
for (int i = 0; i < ARRAYSIZE; i++)
{
printf("[%d]: %d \"%d\" \n", i, out2[i], out2[i] - out[i]);
}
}
int * OddEvenSortSingleThread(int in[])
{
int *out = new int[ARRAYSIZE];
for (int i = 0; i < ARRAYSIZE; i++)
{
out[i] = in[i];
}
// Run the sorting ARRAYSIZE times to make sure that everything is sorted
// NOTE: We have two steps at once
// However, if the ARRAYSIZE is an odd number, we need to make sure that the loop runs enough times
for (int i = 0; i < (ARRAYSIZE + 1) / 2; i++)
{
// Even
for (int j = 0; j < ARRAYSIZE - 1; j += 2)
{
if (out[j] > out[j + 1])
{
swap(out[j], out[j + 1]);
}
}
// Odd
for (int j = 1; j < ARRAYSIZE - 1; j += 2)
{
if (out[j] > out[j + 1])
{
swap(out[j], out[j + 1]);
}
}
}
return out;
}
__device__ void swap_d(int randomNum_d[], int id, int threadStep)
{
int temp;
int thisID = id;
int nextID = id + 1;
int step = 0;
while (nextID + step < ARRAYSIZE)
{
if (randomNum_d[thisID + step] > randomNum_d[nextID + step])
{
temp = randomNum_d[thisID + step];
randomNum_d[thisID + step] = randomNum_d[nextID + step];
randomNum_d[nextID + step] = temp;
}
step += threadStep;
}
}
__global__ void EvenSortParallel(int randomNum_d[], int threadStep)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
// EVEN
// Swap values the thread is assigned with
swap_d(randomNum_d, id * 2, threadStep);
__syncthreads();
}
__global__ void OddSortParallel(int randomNum_d[], int threadStep)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
// ODD
// Swap values the thread is assigned with
swap_d(randomNum_d, id * 2 + 1, threadStep);
__syncthreads();
}
int main(int argc, char* argv[])
{
bool saveToFile = true;
// Loop through args and check for -nodebug
for (int i = 0; i < argc; ++i)
{
if (std::string(argv[i]) == "-noFile") saveToFile = false;
}
/* initialize random seed: */
srand(time(NULL));
std::random_device rd;
std::uniform_int_distribution<int> dist;
int* randomNum = new int[ARRAYSIZE];
for (int i = 0; i < ARRAYSIZE; i++)
{
randomNum[i] = dist(rd) % ARRAYSIZE;
}
// Parallel --------------------------------------------
int size = ARRAYSIZE * sizeof(int);
int *out2 = new int[ARRAYSIZE];
int threadStep = ceil((float) NUM_THREADS / (float) BLOCK_SIZE) * BLOCK_SIZE * 2;
int *randomNum_d;
// Transfer array to device memory
cudaMalloc((void**)&randomNum_d, size);
cudaMemcpy(randomNum_d, randomNum, size, cudaMemcpyHostToDevice);
for (int i = 0; i < (ARRAYSIZE + 1) / 2; i++)
{
// Run ceil(ARRAYSIZE/NUM_THREADS) block of NUM_THREADS threads each --- function<<<grid, threads>>>();
EvenSortParallel <<< ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >>> (randomNum_d, threadStep);
OddSortParallel <<< ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >>> (randomNum_d, threadStep);
}
// Transfer array from device to host
cudaMemcpy(out2, randomNum_d, size, cudaMemcpyDeviceToHost);
// Single Thread --------------------------------------
/*
int * out = new int[ARRAYSIZE];
out = OddEvenSortSingleThread(randomNum);
*/
// Debug the output
//PrintText(out, out2);
// Save result to file
if (saveToFile)
{
std::string fileName = "gpuSorted.txt";
ofstream outputFile(fileName);
if (outputFile.is_open())
{
for (int i = 0; i < ARRAYSIZE; i++)
{
outputFile << out2[i] << std::endl;
}
outputFile.close();
}
else
{
std::cout << fileName.c_str() << " could not be opened" << std::endl;
}
}
// Free memory
cudaFree(randomNum_d);
return 0;
}
|
646146c86de119425ad7b63195ed6b8455826bae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstddef>
#include <iostream>
#include <algorithm>
#include "caffe/3rdparty/ctc.h"
#include "caffe/3rdparty/detail/cpu_ctc.cuh"
#ifdef __HIPCC__
#include "caffe/3rdparty/detail/gpu_ctc.cuh"
#endif
extern "C" {
int get_warpctc_version() {
return 2;
}
const char* ctcGetStatusString(ctcStatus_t status) {
switch (status) {
case CTC_STATUS_SUCCESS:
return "no error";
case CTC_STATUS_MEMOPS_FAILED:
return "cuda memcpy or memset failed";
case CTC_STATUS_INVALID_VALUE:
return "invalid value";
case CTC_STATUS_EXECUTION_FAILED:
return "execution failed";
case CTC_STATUS_UNKNOWN_ERROR:
default:
return "unknown error";
}
}
ctcStatus_t compute_ctc_loss(const float* const activations,
float* gradients,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths,
int alphabet_size,
int minibatch,
float *costs,
void *workspace,
ctcOptions options) {
if (activations == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr ||
costs == nullptr ||
workspace == nullptr ||
alphabet_size <= 0 ||
minibatch <= 0)
return CTC_STATUS_INVALID_VALUE;
if (options.loc == CTC_CPU) {
CpuCTC<float> ctc(alphabet_size, minibatch, workspace, options.num_threads,
options.blank_label);
if (gradients != NULL)
return ctc.cost_and_grad(activations, gradients,
costs,
flat_labels, label_lengths,
input_lengths);
else
return ctc.score_forward(activations, costs, flat_labels,
label_lengths, input_lengths);
} else if (options.loc == CTC_GPU) {
#ifdef __HIPCC__
GpuCTC<float> ctc(alphabet_size, minibatch, workspace, options.stream,
options.blank_label);
if (gradients != NULL)
return ctc.cost_and_grad(activations, gradients, costs,
flat_labels, label_lengths,
input_lengths);
else
return ctc.score_forward(activations, costs, flat_labels,
label_lengths, input_lengths);
#else
std::cerr << "GPU execution requested, but not compiled with GPU support" << std::endl;
return CTC_STATUS_EXECUTION_FAILED;
#endif
} else {
return CTC_STATUS_INVALID_VALUE;
}
}
ctcStatus_t get_workspace_size(const int* const label_lengths,
const int* const input_lengths,
int alphabet_size, int minibatch,
ctcOptions options,
size_t* size_bytes)
{
if (label_lengths == nullptr ||
input_lengths == nullptr ||
size_bytes == nullptr ||
alphabet_size <= 0 ||
minibatch <= 0)
return CTC_STATUS_INVALID_VALUE;
// This is the max of all S and T for all examples in the minibatch.
int maxL = *std::max_element(label_lengths, label_lengths + minibatch);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch);
const int S = 2 * maxL + 1;
*size_bytes = 0;
if (options.loc == CTC_GPU) {
// GPU storage
//nll_forward, nll_backward
*size_bytes += 2 * sizeof(float) * minibatch;
//repeats
*size_bytes += sizeof(int) * minibatch;
//label offsets
*size_bytes += sizeof(int) * minibatch;
//utt_length
*size_bytes += sizeof(int) * minibatch;
//label lengths
*size_bytes += sizeof(int) * minibatch;
//labels without blanks - overallocate for now
*size_bytes += sizeof(int) * maxL * minibatch;
//labels with blanks
*size_bytes += sizeof(int) * S * minibatch;
//alphas
*size_bytes += sizeof(float) * S * maxT * minibatch;
//denoms
*size_bytes += sizeof(float) * maxT * minibatch;
//probs (since we will pass in activations)
*size_bytes += sizeof(float) * alphabet_size * maxT * minibatch;
} else {
//cpu can eventually replace all minibatch with
//max number of concurrent threads if memory is
//really tight
//per minibatch memory
size_t per_minibatch_bytes = 0;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size ;
//alphas
per_minibatch_bytes += sizeof(float) * S * maxT;
//betas
per_minibatch_bytes += sizeof(float) * S;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * S;
*size_bytes = per_minibatch_bytes * minibatch;
//probs
*size_bytes += sizeof(float) * alphabet_size * maxT * minibatch;
}
return CTC_STATUS_SUCCESS;
}
}
|
646146c86de119425ad7b63195ed6b8455826bae.cu
|
#include <cstddef>
#include <iostream>
#include <algorithm>
#include "caffe/3rdparty/ctc.h"
#include "caffe/3rdparty/detail/cpu_ctc.cuh"
#ifdef __CUDACC__
#include "caffe/3rdparty/detail/gpu_ctc.cuh"
#endif
extern "C" {
int get_warpctc_version() {
return 2;
}
const char* ctcGetStatusString(ctcStatus_t status) {
switch (status) {
case CTC_STATUS_SUCCESS:
return "no error";
case CTC_STATUS_MEMOPS_FAILED:
return "cuda memcpy or memset failed";
case CTC_STATUS_INVALID_VALUE:
return "invalid value";
case CTC_STATUS_EXECUTION_FAILED:
return "execution failed";
case CTC_STATUS_UNKNOWN_ERROR:
default:
return "unknown error";
}
}
ctcStatus_t compute_ctc_loss(const float* const activations,
float* gradients,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths,
int alphabet_size,
int minibatch,
float *costs,
void *workspace,
ctcOptions options) {
if (activations == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr ||
costs == nullptr ||
workspace == nullptr ||
alphabet_size <= 0 ||
minibatch <= 0)
return CTC_STATUS_INVALID_VALUE;
if (options.loc == CTC_CPU) {
CpuCTC<float> ctc(alphabet_size, minibatch, workspace, options.num_threads,
options.blank_label);
if (gradients != NULL)
return ctc.cost_and_grad(activations, gradients,
costs,
flat_labels, label_lengths,
input_lengths);
else
return ctc.score_forward(activations, costs, flat_labels,
label_lengths, input_lengths);
} else if (options.loc == CTC_GPU) {
#ifdef __CUDACC__
GpuCTC<float> ctc(alphabet_size, minibatch, workspace, options.stream,
options.blank_label);
if (gradients != NULL)
return ctc.cost_and_grad(activations, gradients, costs,
flat_labels, label_lengths,
input_lengths);
else
return ctc.score_forward(activations, costs, flat_labels,
label_lengths, input_lengths);
#else
std::cerr << "GPU execution requested, but not compiled with GPU support" << std::endl;
return CTC_STATUS_EXECUTION_FAILED;
#endif
} else {
return CTC_STATUS_INVALID_VALUE;
}
}
ctcStatus_t get_workspace_size(const int* const label_lengths,
const int* const input_lengths,
int alphabet_size, int minibatch,
ctcOptions options,
size_t* size_bytes)
{
if (label_lengths == nullptr ||
input_lengths == nullptr ||
size_bytes == nullptr ||
alphabet_size <= 0 ||
minibatch <= 0)
return CTC_STATUS_INVALID_VALUE;
// This is the max of all S and T for all examples in the minibatch.
int maxL = *std::max_element(label_lengths, label_lengths + minibatch);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch);
const int S = 2 * maxL + 1;
*size_bytes = 0;
if (options.loc == CTC_GPU) {
// GPU storage
//nll_forward, nll_backward
*size_bytes += 2 * sizeof(float) * minibatch;
//repeats
*size_bytes += sizeof(int) * minibatch;
//label offsets
*size_bytes += sizeof(int) * minibatch;
//utt_length
*size_bytes += sizeof(int) * minibatch;
//label lengths
*size_bytes += sizeof(int) * minibatch;
//labels without blanks - overallocate for now
*size_bytes += sizeof(int) * maxL * minibatch;
//labels with blanks
*size_bytes += sizeof(int) * S * minibatch;
//alphas
*size_bytes += sizeof(float) * S * maxT * minibatch;
//denoms
*size_bytes += sizeof(float) * maxT * minibatch;
//probs (since we will pass in activations)
*size_bytes += sizeof(float) * alphabet_size * maxT * minibatch;
} else {
//cpu can eventually replace all minibatch with
//max number of concurrent threads if memory is
//really tight
//per minibatch memory
size_t per_minibatch_bytes = 0;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size ;
//alphas
per_minibatch_bytes += sizeof(float) * S * maxT;
//betas
per_minibatch_bytes += sizeof(float) * S;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * S;
*size_bytes = per_minibatch_bytes * minibatch;
//probs
*size_bytes += sizeof(float) * alphabet_size * maxT * minibatch;
}
return CTC_STATUS_SUCCESS;
}
}
|
dc4958db0a75086e049ae64c9180011cfdaae8a3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaStepper.cuh"
#include <stdio.h>
__global__ void stepper(
float* d_firingRate,
float* d_newFiringRate,
float* d_connMatrix,
int* d_sampleNeuronIndexes,
float* d_biasVec,
float* d_samples,
float* stepSize,
int* numNeurons) {
int neurNum = blockIdx.x;
float fireSum = 0;
int index;
for (int i = 0; i < (*numNeurons); i++) {
index = neurNum * (*numNeurons) + i;
fireSum += d_firingRate[i] * d_connMatrix[index];
}
fireSum += d_biasVec[neurNum];
float fVal = 1 / (1 + exp(-fireSum));
fVal -= d_firingRate[neurNum];
d_newFiringRate[neurNum] = d_firingRate[neurNum] + (fVal * (*stepSize));
index = d_sampleNeuronIndexes[neurNum];
if (index > -1) {
//printf("firingRate sample: %f\n", d_newFiringRate[neurNum]);
d_samples[index] = d_newFiringRate[neurNum];
}
}
namespace NNet {
float** stepSys(
int numSteps,
int numNeurons,
float** connMatrix,
float* biasVec,
float* startRate,
std::vector<int> sampleNeurons,
float stepSize
) {
//Set up flag array for sampled neurons
int* d_sampleNeuronIndexes;
int* sampleNeuronFlags = new int[numNeurons];
for (int i = 0; i < numNeurons; i++) {
sampleNeuronFlags[i] = -1;
}
float* sampleLayer = new float[sampleNeurons.size()];
float** sampleRates = new float* [numSteps + 1];
int acc = 0;
for (int i = 0; i < sampleNeurons.size(); i++) {
sampleNeuronFlags[sampleNeurons[i]] = acc;
acc++;
sampleLayer[i] = startRate[sampleNeurons[i]];
}
sampleRates[0] = sampleLayer;
int size = sizeof(int) * numNeurons;
hipMalloc((void**)&d_sampleNeuronIndexes, size);
hipMemcpy(d_sampleNeuronIndexes, sampleNeuronFlags, size, hipMemcpyHostToDevice);
float* d_samples;
size = sampleNeurons.size() * sizeof(float);
hipMalloc((void**)&d_samples, size);
//Copying to GPU system properties
float* d_biasVec; float* d_firingRate;
float* connMatrix1D; float* d_connMatrix;
float* d_newFiringRate;
int index;
connMatrix1D = new float[numNeurons * numNeurons];
for (int i = 0; i < numNeurons; i++) {
for (int j = 0; j < numNeurons; j++) {
index = i * numNeurons + j;
connMatrix1D[index] = connMatrix[i][j];
}
}
size = sizeof(float) * numNeurons * numNeurons;
hipMalloc((void**)&d_connMatrix, size);
hipMemcpy(d_connMatrix, connMatrix1D, size, hipMemcpyHostToDevice);
size = sizeof(float) * numNeurons;
hipMalloc((void**)&d_biasVec, size); hipMalloc((void**)&d_firingRate, size);
hipMemcpy(d_biasVec, biasVec, size, hipMemcpyHostToDevice);
hipMemcpy(d_firingRate, startRate, size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_newFiringRate, size);
size = sizeof(int);
int* d_numNeur; hipMalloc((void**)&d_numNeur, size);
hipMemcpy(d_numNeur, &numNeurons, size, hipMemcpyHostToDevice);
size = sizeof(float);
float* d_stepSize; hipMalloc((void**)&d_stepSize, size);
hipMemcpy(d_stepSize, &stepSize, size, hipMemcpyHostToDevice);
int sizeSample = sizeof(float) * sampleNeurons.size();
int sizeUpdate = sizeof(float) * numNeurons;
//Stepping system over input number of steps using stepper cuda kernel.
/*for (int i = 0; i < numSteps; i++) {
stepper<<<numNeurons, 1>>>(d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
hipMemcpy(sampleLayer, d_samples, sizeSample, hipMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
hipMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, hipMemcpyDeviceToDevice);
}*/
//stepping system over input number of steps... Sin(t) bias'
float time = 0.0;
int sizeBias = sizeof(float) * numNeurons;
for (int i = 0; i < numSteps; i++) {
float sint = sin(time);
for (int i = 0; i < numNeurons; i++) {
biasVec[i] = abs(sint);
}
time += stepSize;
hipMemcpy(d_biasVec, biasVec, sizeBias, hipMemcpyHostToDevice);
stepper << <numNeurons, 1 >> > (d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
hipMemcpy(sampleLayer, d_samples, sizeSample, hipMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
hipMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, hipMemcpyDeviceToDevice);
}
return sampleRates;
}
}
|
dc4958db0a75086e049ae64c9180011cfdaae8a3.cu
|
#include "cudaStepper.cuh"
#include <stdio.h>
__global__ void stepper(
float* d_firingRate,
float* d_newFiringRate,
float* d_connMatrix,
int* d_sampleNeuronIndexes,
float* d_biasVec,
float* d_samples,
float* stepSize,
int* numNeurons) {
int neurNum = blockIdx.x;
float fireSum = 0;
int index;
for (int i = 0; i < (*numNeurons); i++) {
index = neurNum * (*numNeurons) + i;
fireSum += d_firingRate[i] * d_connMatrix[index];
}
fireSum += d_biasVec[neurNum];
float fVal = 1 / (1 + exp(-fireSum));
fVal -= d_firingRate[neurNum];
d_newFiringRate[neurNum] = d_firingRate[neurNum] + (fVal * (*stepSize));
index = d_sampleNeuronIndexes[neurNum];
if (index > -1) {
//printf("firingRate sample: %f\n", d_newFiringRate[neurNum]);
d_samples[index] = d_newFiringRate[neurNum];
}
}
namespace NNet {
float** stepSys(
int numSteps,
int numNeurons,
float** connMatrix,
float* biasVec,
float* startRate,
std::vector<int> sampleNeurons,
float stepSize
) {
//Set up flag array for sampled neurons
int* d_sampleNeuronIndexes;
int* sampleNeuronFlags = new int[numNeurons];
for (int i = 0; i < numNeurons; i++) {
sampleNeuronFlags[i] = -1;
}
float* sampleLayer = new float[sampleNeurons.size()];
float** sampleRates = new float* [numSteps + 1];
int acc = 0;
for (int i = 0; i < sampleNeurons.size(); i++) {
sampleNeuronFlags[sampleNeurons[i]] = acc;
acc++;
sampleLayer[i] = startRate[sampleNeurons[i]];
}
sampleRates[0] = sampleLayer;
int size = sizeof(int) * numNeurons;
cudaMalloc((void**)&d_sampleNeuronIndexes, size);
cudaMemcpy(d_sampleNeuronIndexes, sampleNeuronFlags, size, cudaMemcpyHostToDevice);
float* d_samples;
size = sampleNeurons.size() * sizeof(float);
cudaMalloc((void**)&d_samples, size);
//Copying to GPU system properties
float* d_biasVec; float* d_firingRate;
float* connMatrix1D; float* d_connMatrix;
float* d_newFiringRate;
int index;
connMatrix1D = new float[numNeurons * numNeurons];
for (int i = 0; i < numNeurons; i++) {
for (int j = 0; j < numNeurons; j++) {
index = i * numNeurons + j;
connMatrix1D[index] = connMatrix[i][j];
}
}
size = sizeof(float) * numNeurons * numNeurons;
cudaMalloc((void**)&d_connMatrix, size);
cudaMemcpy(d_connMatrix, connMatrix1D, size, cudaMemcpyHostToDevice);
size = sizeof(float) * numNeurons;
cudaMalloc((void**)&d_biasVec, size); cudaMalloc((void**)&d_firingRate, size);
cudaMemcpy(d_biasVec, biasVec, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_firingRate, startRate, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_newFiringRate, size);
size = sizeof(int);
int* d_numNeur; cudaMalloc((void**)&d_numNeur, size);
cudaMemcpy(d_numNeur, &numNeurons, size, cudaMemcpyHostToDevice);
size = sizeof(float);
float* d_stepSize; cudaMalloc((void**)&d_stepSize, size);
cudaMemcpy(d_stepSize, &stepSize, size, cudaMemcpyHostToDevice);
int sizeSample = sizeof(float) * sampleNeurons.size();
int sizeUpdate = sizeof(float) * numNeurons;
//Stepping system over input number of steps using stepper cuda kernel.
/*for (int i = 0; i < numSteps; i++) {
stepper<<<numNeurons, 1>>>(d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
cudaMemcpy(sampleLayer, d_samples, sizeSample, cudaMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
cudaMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, cudaMemcpyDeviceToDevice);
}*/
//stepping system over input number of steps... Sin(t) bias'
float time = 0.0;
int sizeBias = sizeof(float) * numNeurons;
for (int i = 0; i < numSteps; i++) {
float sint = sin(time);
for (int i = 0; i < numNeurons; i++) {
biasVec[i] = abs(sint);
}
time += stepSize;
cudaMemcpy(d_biasVec, biasVec, sizeBias, cudaMemcpyHostToDevice);
stepper << <numNeurons, 1 >> > (d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
cudaMemcpy(sampleLayer, d_samples, sizeSample, cudaMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
cudaMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, cudaMemcpyDeviceToDevice);
}
return sampleRates;
}
}
|
d516d66f591e6a78a8a4f587a37fd2d35113a3c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
d516d66f591e6a78a8a4f587a37fd2d35113a3c1.cu
|
#include <assert.h>
#include <stdio.h>
#include "star2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a6432615b9e9c2bcdacb76c1a32a1b3a2d3364b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <rocblas.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <stdio.h>
// C-style indexing
int ci(int row, int column, int nColumns)
{
return row * nColumns + column;
}
int rowA = 1000;
int colA = 3000;
int rowB = colA;
int colB = 2000;
int rowC = rowA;
int colC = colB;
hipblasHandle_t handle;
hipblasStatus_t status;
void device_info();
void gpu_random_init(float* A, int rows, int cols);
void gpu_matrix_product(const float* A, const float* B, float* C);
void cpu_matrix_product(const float* A, const float* B, float* C);
void matrix_print(float* A, int rows, int cols);
void check_results(float* A, float* B);
int main()
{
device_info();
// allocate three device_vectors with row*col elements
thrust::device_vector<float> d_A(rowA * colA);
thrust::device_vector<float> d_B(rowB * colB);
thrust::device_vector<float> d_C(rowC * colC);
//init matrixies A and B on curandon on device
gpu_random_init(raw_pointer_cast(&d_A[0]), rowA, colA);
gpu_random_init(raw_pointer_cast(&d_B[0]), rowB, colB);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
gpu_matrix_product(raw_pointer_cast(&d_A[0]), raw_pointer_cast(&d_B[0]), raw_pointer_cast(&d_C[0]));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("CUBLAS time : %f ms\n", milliseconds);
thrust::host_vector<float> h_A(rowA * colA);
thrust::host_vector<float> h_B(rowB * colB);
thrust::host_vector<float> h_C_cublas(rowC * colC);
thrust::host_vector<float> h_C_cpu(rowC * colC);
//Copy matrixies A B C from device to host
copy(d_A.begin(), d_A.end(), h_A.begin());
copy(d_B.begin(), d_B.end(), h_B.begin());
copy(d_C.begin(), d_C.end(), h_C_cublas.begin());
//Evaluate matrix product on cpu
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
cpu_matrix_product(&h_A[0], &h_B[0], &h_C_cpu[0]);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("CPU's time : %f ms\n", milliseconds);
//Compare matrixies by elements
check_results(&h_C_cublas[0], &h_C_cpu[0]);
return 0;
}
void device_info()
{
int kb = 1024;
int mb = kb * kb;
int GPU_N;
hipGetDeviceCount(&GPU_N);
printf("Device count: %d\n", GPU_N);
for (int i = 0; i < GPU_N; i++)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
printf("PCI Bus id: %d\n", props.pciBusID);
hipGetDeviceProperties(&props, i);
printf("Device %i: %s: %i.%i\n", i, props.name, props.major, props.minor);
printf("Global memory: %i mb\n", props.totalGlobalMem / mb);
printf("Shared memory: %i kb\n", props.sharedMemPerBlock / kb);
printf("Constant memory: %i kb\n", props.totalConstMem / kb);
printf("Block registers: %i\n", props.regsPerBlock);
printf("Warp size: %i\n", props.warpSize);
printf("Threads per block: %i\n", props.maxThreadsPerBlock);
printf("Max block dimensions: [ %i, %i, %i]\n", props.maxThreadsDim[0], props.maxThreadsDim[1],
props.maxThreadsDim[2]);
printf("Max grid dimensions: [ %i, %i, %i]\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]);
}
}
void gpu_matrix_product(const float* A, const float* B, float* C)
{
// Initialize CUBLAS
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS)
printf("CUBLAS initialization error with message %s\n", status);
float alpha = 1.0f;
float beta = 0.0f;
//C = alpha*A*B + beta * C
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, colB, rowA, colA, &alpha, B, colB, A, colA, &beta, C, colC);
if (status != HIPBLAS_STATUS_SUCCESS)
printf("Kernel execution error with message %s\n", status);
}
void cpu_matrix_product(const float* A, const float* B, float* C)
{
for (int i = 0; i < rowC; i++)
{
for (int j = 0; j < colC; j++)
{
C[ci(i, j, colC)] = 0;
}
}
for (int i = 0; i < rowA; i++)
{
for (int j = 0; j < colB; j++)
{
for (int k = 0; k < colA; k++)
{
C[ci(i, j, colC)] += A[ci(i, k, colA)] * B[ci(k, j, colB)];
}
}
}
}
void gpu_random_init(float* A, int rows, int cols)
{
// Create a pseudo-random number generator
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(gen, (unsigned long long)clock());
// Fill the array with random numbers on the device
size_t n = rows * cols;
hiprandGenerateUniform(gen, A, n);
hiprandDestroyGenerator(gen); /* Cleanup */
}
void matrix_print(float* A, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
std::cout << A[ci(i, j, cols)] << " ";
}
printf("\n");
}
}
void check_results(float* A, float* B)
{
float eps = 0.001;
for (int i = 0; i < rowC; i++)
{
for (int j = 0; j < colC; j++)
{
if (fabs(A[ci(i, j, colC)] - B[ci(i, j, colC)]) > eps)
{
printf("The element %f is not equal $f \n", A[ci(i, j, colC)], B[ci(i, j, colC)]);
return;
}
}
}
}
|
a6432615b9e9c2bcdacb76c1a32a1b3a2d3364b5.cu
|
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <cublas_v2.h>
#include <iostream>
#include <cuda.h>
#include <curand.h>
#include <stdio.h>
// C-style indexing
int ci(int row, int column, int nColumns)
{
return row * nColumns + column;
}
int rowA = 1000;
int colA = 3000;
int rowB = colA;
int colB = 2000;
int rowC = rowA;
int colC = colB;
cublasHandle_t handle;
cublasStatus_t status;
void device_info();
void gpu_random_init(float* A, int rows, int cols);
void gpu_matrix_product(const float* A, const float* B, float* C);
void cpu_matrix_product(const float* A, const float* B, float* C);
void matrix_print(float* A, int rows, int cols);
void check_results(float* A, float* B);
int main()
{
device_info();
// allocate three device_vectors with row*col elements
thrust::device_vector<float> d_A(rowA * colA);
thrust::device_vector<float> d_B(rowB * colB);
thrust::device_vector<float> d_C(rowC * colC);
//init matrixies A and B on curandon on device
gpu_random_init(raw_pointer_cast(&d_A[0]), rowA, colA);
gpu_random_init(raw_pointer_cast(&d_B[0]), rowB, colB);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
gpu_matrix_product(raw_pointer_cast(&d_A[0]), raw_pointer_cast(&d_B[0]), raw_pointer_cast(&d_C[0]));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("CUBLAS time : %f ms\n", milliseconds);
thrust::host_vector<float> h_A(rowA * colA);
thrust::host_vector<float> h_B(rowB * colB);
thrust::host_vector<float> h_C_cublas(rowC * colC);
thrust::host_vector<float> h_C_cpu(rowC * colC);
//Copy matrixies A B C from device to host
copy(d_A.begin(), d_A.end(), h_A.begin());
copy(d_B.begin(), d_B.end(), h_B.begin());
copy(d_C.begin(), d_C.end(), h_C_cublas.begin());
//Evaluate matrix product on cpu
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cpu_matrix_product(&h_A[0], &h_B[0], &h_C_cpu[0]);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("CPU's time : %f ms\n", milliseconds);
//Compare matrixies by elements
check_results(&h_C_cublas[0], &h_C_cpu[0]);
return 0;
}
void device_info()
{
int kb = 1024;
int mb = kb * kb;
int GPU_N;
cudaGetDeviceCount(&GPU_N);
printf("Device count: %d\n", GPU_N);
for (int i = 0; i < GPU_N; i++)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
printf("PCI Bus id: %d\n", props.pciBusID);
cudaGetDeviceProperties(&props, i);
printf("Device %i: %s: %i.%i\n", i, props.name, props.major, props.minor);
printf("Global memory: %i mb\n", props.totalGlobalMem / mb);
printf("Shared memory: %i kb\n", props.sharedMemPerBlock / kb);
printf("Constant memory: %i kb\n", props.totalConstMem / kb);
printf("Block registers: %i\n", props.regsPerBlock);
printf("Warp size: %i\n", props.warpSize);
printf("Threads per block: %i\n", props.maxThreadsPerBlock);
printf("Max block dimensions: [ %i, %i, %i]\n", props.maxThreadsDim[0], props.maxThreadsDim[1],
props.maxThreadsDim[2]);
printf("Max grid dimensions: [ %i, %i, %i]\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]);
}
}
void gpu_matrix_product(const float* A, const float* B, float* C)
{
// Initialize CUBLAS
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
printf("CUBLAS initialization error with message %s\n", status);
float alpha = 1.0f;
float beta = 0.0f;
//C = alpha*A*B + beta * C
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, colB, rowA, colA, &alpha, B, colB, A, colA, &beta, C, colC);
if (status != CUBLAS_STATUS_SUCCESS)
printf("Kernel execution error with message %s\n", status);
}
void cpu_matrix_product(const float* A, const float* B, float* C)
{
for (int i = 0; i < rowC; i++)
{
for (int j = 0; j < colC; j++)
{
C[ci(i, j, colC)] = 0;
}
}
for (int i = 0; i < rowA; i++)
{
for (int j = 0; j < colB; j++)
{
for (int k = 0; k < colA; k++)
{
C[ci(i, j, colC)] += A[ci(i, k, colA)] * B[ci(k, j, colB)];
}
}
}
}
void gpu_random_init(float* A, int rows, int cols)
{
// Create a pseudo-random number generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(gen, (unsigned long long)clock());
// Fill the array with random numbers on the device
size_t n = rows * cols;
curandGenerateUniform(gen, A, n);
curandDestroyGenerator(gen); /* Cleanup */
}
void matrix_print(float* A, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
std::cout << A[ci(i, j, cols)] << " ";
}
printf("\n");
}
}
void check_results(float* A, float* B)
{
float eps = 0.001;
for (int i = 0; i < rowC; i++)
{
for (int j = 0; j < colC; j++)
{
if (fabs(A[ci(i, j, colC)] - B[ci(i, j, colC)]) > eps)
{
printf("The element %f is not equal $f \n", A[ci(i, j, colC)], B[ci(i, j, colC)]);
return;
}
}
}
}
|
7f0f4bb19318b025499e503ed27385f9fe45105d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
batch version of ball query, modified from the original implementation of official PointNet++ codes.
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2){
if (cnt == 0){
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
const float *new_xyz, const float *xyz, int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
hipError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( ball_query_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, n, m, radius, nsample, new_xyz, xyz, idx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
|
7f0f4bb19318b025499e503ed27385f9fe45105d.cu
|
/*
batch version of ball query, modified from the original implementation of official PointNet++ codes.
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2){
if (cnt == 0){
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
const float *new_xyz, const float *xyz, int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_fast<<<blocks, threads>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
07f1f1b7b150774211532715d49bcf13b8c9ad72.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
// @author [email protected]
//
#include "../scalar_bool.h"
#include <op_boilerplate.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer);
}
// *********************************************************************//
// *********************************************************************//
namespace functions {
namespace scalar {
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void* vscalar,
void *vy, Nd4jLong *yShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationBuffer) {
auto scalar = reinterpret_cast<X*>(vscalar)[0];
auto y = reinterpret_cast<X*>(vy);
auto params = reinterpret_cast<X*>(vparams);
auto z = reinterpret_cast<Z*>(vz);
auto yRank = shape::rank(yShapeInfo);
auto yEWS = shape::elementWiseStride(yShapeInfo);
auto yShape = shape::shapeOf(yShapeInfo);
auto yStride = shape::stride(yShapeInfo);
auto zRank = shape::rank(zShapeInfo);
auto zEWS = shape::elementWiseStride(zShapeInfo);
auto zShape = shape::shapeOf(zShapeInfo);
auto zStride = shape::stride(zShapeInfo);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int len;
if(threadIdx.x == 0)
len = shape::length(yShapeInfo);
__syncthreads();
if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) {
transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer);
}
else {
for (Nd4jLong i = tid; i < len; i+= totalThreads)
z[shape::getIndexOffset(i, zShapeInfo)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo)], scalar, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(Nd4jLong len,
void* vx,
void *vy, Nd4jLong yEWS,
void *vparams,
void *vz, Nd4jLong zEWS,
int *allocationBuffer) {
auto x = reinterpret_cast<X*>(vx)[0];
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
Nd4jLong i = tid;
if(yEWS == 1 && zEWS == 1) {
for (; i < len; i += totalThreads)
z[i] = OpType::op(y[i], x, params);
}
else {
for (; i < len; i += totalThreads)
z[i * zEWS] = OpType::op(y[i * yEWS], x, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vscalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto x = reinterpret_cast<X*>(vx);
auto scalars = reinterpret_cast<X*>(vscalars);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
if (tadShapeInfoZ == nullptr) {
tadShapeInfoZ = tadShapeInfo;
tadOffsetsZ = tadOffsets;
}
// tad preparation
auto tadEws = shape::elementWiseStride(tadShapeInfo);
auto zEws = shape::elementWiseStride(tadShapeInfoZ);
auto tadLength = shape::length(tadShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto numTads =shape::length(xShapeInfo) / tadLength;
if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams);
}
} else {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[shape::getIndexOffset(f, tadShapeInfoZ)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo)], s, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void ScalarBoolTransform<X, Z>::intermediateAlongDimension(dim3& launchDims, hipStream_t *stream,
void *x, Nd4jLong *xShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
void *extraParams,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipLaunchKernelGGL(( scalarAlongDimension<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "scalarAlongDim(...) failed");
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
void _CUDA_H ScalarBoolTransform<X,Z>::intermediateShaped(dim3& launchDims, hipStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams, int *allocPointer){
hipLaunchKernelGGL(( scalarSimpleShaped<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer);
nd4j::DebugHelper::checkErrorCode(stream, "scalarSimpleShaped(...) failed");
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaShaped(dim3& launchDims, hipStream_t *stream,
int opNum,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H14 opNum:[%i]\n", opNum);
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_BOOL_OPS);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaAlongDimension(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ScalarBoolTransform, , LIBND4J_TYPES, BOOL_TYPES);
template<typename X, typename Y>
template <typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(const int opNum, void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(const int opNum, void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) {
}
template<typename X, typename Y>
template<typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) {
}
template<typename X, typename Y>
template<typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) {
}
}
}
|
07f1f1b7b150774211532715d49bcf13b8c9ad72.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
// @author [email protected]
//
#include "../scalar_bool.h"
#include <op_boilerplate.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer);
}
// *********************************************************************//
// *********************************************************************//
namespace functions {
namespace scalar {
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void* vscalar,
void *vy, Nd4jLong *yShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationBuffer) {
auto scalar = reinterpret_cast<X*>(vscalar)[0];
auto y = reinterpret_cast<X*>(vy);
auto params = reinterpret_cast<X*>(vparams);
auto z = reinterpret_cast<Z*>(vz);
auto yRank = shape::rank(yShapeInfo);
auto yEWS = shape::elementWiseStride(yShapeInfo);
auto yShape = shape::shapeOf(yShapeInfo);
auto yStride = shape::stride(yShapeInfo);
auto zRank = shape::rank(zShapeInfo);
auto zEWS = shape::elementWiseStride(zShapeInfo);
auto zShape = shape::shapeOf(zShapeInfo);
auto zStride = shape::stride(zShapeInfo);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int len;
if(threadIdx.x == 0)
len = shape::length(yShapeInfo);
__syncthreads();
if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) {
transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer);
}
else {
for (Nd4jLong i = tid; i < len; i+= totalThreads)
z[shape::getIndexOffset(i, zShapeInfo)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo)], scalar, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(Nd4jLong len,
void* vx,
void *vy, Nd4jLong yEWS,
void *vparams,
void *vz, Nd4jLong zEWS,
int *allocationBuffer) {
auto x = reinterpret_cast<X*>(vx)[0];
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
Nd4jLong i = tid;
if(yEWS == 1 && zEWS == 1) {
for (; i < len; i += totalThreads)
z[i] = OpType::op(y[i], x, params);
}
else {
for (; i < len; i += totalThreads)
z[i * zEWS] = OpType::op(y[i * yEWS], x, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vscalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto x = reinterpret_cast<X*>(vx);
auto scalars = reinterpret_cast<X*>(vscalars);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
if (tadShapeInfoZ == nullptr) {
tadShapeInfoZ = tadShapeInfo;
tadOffsetsZ = tadOffsets;
}
// tad preparation
auto tadEws = shape::elementWiseStride(tadShapeInfo);
auto zEws = shape::elementWiseStride(tadShapeInfoZ);
auto tadLength = shape::length(tadShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto numTads =shape::length(xShapeInfo) / tadLength;
if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams);
}
} else {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[shape::getIndexOffset(f, tadShapeInfoZ)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo)], s, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void ScalarBoolTransform<X, Z>::intermediateAlongDimension(dim3& launchDims, cudaStream_t *stream,
void *x, Nd4jLong *xShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
void *extraParams,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
scalarAlongDimension<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "scalarAlongDim(...) failed");
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
void _CUDA_H ScalarBoolTransform<X,Z>::intermediateShaped(dim3& launchDims, cudaStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams, int *allocPointer){
scalarSimpleShaped<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer);
nd4j::DebugHelper::checkErrorCode(stream, "scalarSimpleShaped(...) failed");
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaShaped(dim3& launchDims, cudaStream_t *stream,
int opNum,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H14 opNum:[%i]\n", opNum);
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_BOOL_OPS);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaAlongDimension(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ScalarBoolTransform, , LIBND4J_TYPES, BOOL_TYPES);
template<typename X, typename Y>
template <typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(const int opNum, void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) {
}
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::transform(const int opNum, void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) {
}
template<typename X, typename Y>
template<typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) {
}
template<typename X, typename Y>
template<typename OpType>
void ScalarBoolTransform<X,Y>::transform(void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) {
}
}
}
|
fcfe9c067280499a76bc8e42e7e36913a0456bf6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlaswp_batched.cu, normal z -> s, Mon Jun 25 18:24:15 2018
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
#define SLASWP_COL_NTH 32
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slaswp_rowparallel_devfunc(
int n, int width, int height,
float *dA, int lda,
float *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
float *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
//printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement);
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void slaswp_rowparallel_kernel(
int n, int width, int height,
float *dinput, int ldi,
float *doutput, int ldo,
magma_int_t* pivinfo)
{
slaswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void slaswp_rowparallel_kernel_batched(
int n, int width, int height,
float **input_array, int input_i, int input_j, int ldi,
float **output_array, int output_i, int output_j, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
slaswp_rowparallel_devfunc( n, width, height,
input_array[batchid] + input_j * ldi + input_i, ldi,
output_array[batchid] + output_j * ldo + output_i, ldo,
pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_slaswp_rowparallel_batched( magma_int_t n,
float** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi,
float** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
#define input_array(i,j) input_array, i, j
#define output_array(i,j) output_array, i, j
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(float) * height * n;
hipLaunchKernelGGL(( slaswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(float) * height * SWP_WIDTH;
hipLaunchKernelGGL(( slaswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
#undef input_array
#undef output_attay
}
/******************************************************************************/
extern "C" void
magma_slaswp_rowparallel_native(
magma_int_t n,
float* input, magma_int_t ldi,
float* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_slaswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(float) * height * n;
hipLaunchKernelGGL(( slaswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(float) * height * SWP_WIDTH;
hipLaunchKernelGGL(( slaswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void slaswp_rowserial_kernel_batched( int n, float **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
float* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
float A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void slaswp_rowserial_kernel_native( int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
//k1--;
//k2--;
if (tid < n) {
float A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_rowserial_batched(magma_int_t n, float** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( slaswp_rowserial_kernel_batched)
, dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_rowserial_native(magma_int_t n, magmaFloat_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t* dipiv, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, 1);
hipLaunchKernelGGL(( slaswp_rowserial_kernel_native)
, dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__device__ void slaswp_columnserial_devfunc(int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
float A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
__global__ void slaswp_columnserial_kernel_batched( int n, float **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
float* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
slaswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
__global__ void slaswp_columnserial_kernel( int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
slaswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_columnserial(
magma_int_t n, magmaFloat_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t *dipiv, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, SLASWP_COL_NTH );
dim3 grid(blocks, 1, 1);
hipLaunchKernelGGL(( slaswp_columnserial_kernel), dim3(grid), dim3(SLASWP_COL_NTH), 0, queue->cuda_stream() ,
n, dA, lda, k1, k2, dipiv);
}
extern "C" void
magma_slaswp_columnserial_batched(magma_int_t n, float** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, SLASWP_COL_NTH );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( slaswp_columnserial_kernel_batched)
, dim3(grid), dim3(min(SLASWP_COL_NTH,n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
|
fcfe9c067280499a76bc8e42e7e36913a0456bf6.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlaswp_batched.cu, normal z -> s, Mon Jun 25 18:24:15 2018
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
#define SLASWP_COL_NTH 32
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slaswp_rowparallel_devfunc(
int n, int width, int height,
float *dA, int lda,
float *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
float *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
//printf("%d: mynewroworig = %d, itsreplacement = %d\n", tid, mynewroworig, itsreplacement);
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void slaswp_rowparallel_kernel(
int n, int width, int height,
float *dinput, int ldi,
float *doutput, int ldo,
magma_int_t* pivinfo)
{
slaswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void slaswp_rowparallel_kernel_batched(
int n, int width, int height,
float **input_array, int input_i, int input_j, int ldi,
float **output_array, int output_i, int output_j, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
slaswp_rowparallel_devfunc( n, width, height,
input_array[batchid] + input_j * ldi + input_i, ldi,
output_array[batchid] + output_j * ldo + output_i, ldo,
pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_slaswp_rowparallel_batched( magma_int_t n,
float** input_array, magma_int_t input_i, magma_int_t input_j, magma_int_t ldi,
float** output_array, magma_int_t output_i, magma_int_t output_j, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
#define input_array(i,j) input_array, i, j
#define output_array(i,j) output_array, i, j
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(float) * height * n;
slaswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(float) * height * SWP_WIDTH;
slaswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input_array, input_i, input_j, ldi, output_array, output_i, output_j, ldo, pivinfo_array );
}
#undef input_array
#undef output_attay
}
/******************************************************************************/
extern "C" void
magma_slaswp_rowparallel_native(
magma_int_t n,
float* input, magma_int_t ldi,
float* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_slaswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(float) * height * n;
slaswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(float) * height * SWP_WIDTH;
slaswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void slaswp_rowserial_kernel_batched( int n, float **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
float* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
float A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void slaswp_rowserial_kernel_native( int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
//k1--;
//k2--;
if (tid < n) {
float A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_rowserial_batched(magma_int_t n, float** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
slaswp_rowserial_kernel_batched
<<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_rowserial_native(magma_int_t n, magmaFloat_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t* dipiv, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, 1);
slaswp_rowserial_kernel_native
<<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__device__ void slaswp_columnserial_devfunc(int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
float A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = dipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
__global__ void slaswp_columnserial_kernel_batched( int n, float **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
float* dA = dA_array[blockIdx.z];
magma_int_t *dipiv = ipiv_array[blockIdx.z];
slaswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
__global__ void slaswp_columnserial_kernel( int n, magmaFloat_ptr dA, int lda, int k1, int k2, magma_int_t* dipiv )
{
slaswp_columnserial_devfunc(n, dA, lda, k1, k2, dipiv);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_slaswp_columnserial(
magma_int_t n, magmaFloat_ptr dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t *dipiv, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, SLASWP_COL_NTH );
dim3 grid(blocks, 1, 1);
slaswp_columnserial_kernel<<< grid, SLASWP_COL_NTH, 0, queue->cuda_stream() >>>
(n, dA, lda, k1, k2, dipiv);
}
extern "C" void
magma_slaswp_columnserial_batched(magma_int_t n, float** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, SLASWP_COL_NTH );
dim3 grid(blocks, 1, batchCount);
slaswp_columnserial_kernel_batched
<<< grid, min(SLASWP_COL_NTH,n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
|
31c868a437d455c2947cbe1c85e9d403e22bd1c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_saxpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float a = 2;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *s = NULL;
hipMalloc(&s, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_saxpy), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,x,y,s);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_saxpy), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,x,y,s);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_saxpy), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,x,y,s);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
31c868a437d455c2947cbe1c85e9d403e22bd1c7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_saxpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float a = 2;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *s = NULL;
cudaMalloc(&s, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_saxpy<<<gridBlock,threadBlock>>>(n,a,x,y,s);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_saxpy<<<gridBlock,threadBlock>>>(n,a,x,y,s);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_saxpy<<<gridBlock,threadBlock>>>(n,a,x,y,s);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1ccb4b74ebf5b76f97fdf8ce88e7982bd7ef3980.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_forward_template_helpers.cuh"
namespace nbit {
using namespace at;
using namespace fbgemm_gpu;
// Keep in sync with split_embedding_configs.py:SparseType
enum class SparseType : uint8_t {
FP32 = 0,
FP16 = 1,
INT8 = 2,
INT4 = 3,
INT2 = 4,
};
__forceinline__ __host__ __device__ uint32_t round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b) * b;
}
__forceinline__ __host__ __device__ uint32_t div_round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b);
}
__host__ __device__ inline int32_t unpadded_row_size_in_bytes(int32_t dim, SparseType weight_ty) {
if (weight_ty == SparseType::FP16) { return dim * 2; }
if (weight_ty == SparseType::INT8) { return dim + 4; }
if (weight_ty == SparseType::INT4) { return dim / 2 + 4; }
if (weight_ty == SparseType::INT2) { return dim / 4 + 4; }
return 0;
}
__host__ __device__ inline int32_t padded_row_size_in_bytes(int32_t dim, SparseType weight_ty) {
auto r = unpadded_row_size_in_bytes(dim, weight_ty);
return round_up(r, 16);
}
// "Effective" number of elements in the row when we include the row-wise quantization parameters.
__device__ inline int32_t padded_D(int32_t dim, SparseType weight_ty) {
if (weight_ty == SparseType::FP16) { return dim; }
if (weight_ty == SparseType::INT8) { return dim + 4; }
if (weight_ty == SparseType::INT4) { return dim + 8; }
if (weight_ty == SparseType::INT2) { return dim + 16; }
return 0;
}
struct __align__(32) float8 {
__host__ __device__ float8() {}
float4 vals[2];
};
struct __align__(8) half4 {
__host__ __device__ half4() {}
half2 vals[2];
};
struct __align__(16) half8 {
__host__ __device__ half8() {}
half2 vals[4];
};
__device__ __forceinline__ float4 make_zero_float4() {
return make_float4(0, 0, 0, 0);
}
__device__ __forceinline__ float8 make_zero_float8() {
float8 t;
t.vals[0] = make_float4(0, 0, 0, 0);
t.vals[1] = make_float4(0, 0, 0, 0);
return t;
}
__device__ __forceinline__ float2 make_zero_float2() {
return make_float2(0, 0);
}
__device__ __forceinline__ half8 to_half8(float8 v) {
half8 t;
t.vals[0] = __float22half2_rn(make_float2(v.vals[0].x, v.vals[0].y));
t.vals[1] = __float22half2_rn(make_float2(v.vals[0].z, v.vals[0].w));
t.vals[2] = __float22half2_rn(make_float2(v.vals[1].x, v.vals[1].y));
t.vals[3] = __float22half2_rn(make_float2(v.vals[1].z, v.vals[1].w));
return t;
}
__device__ __forceinline__ half4 to_half4(float4 v) {
half4 t;
t.vals[0] = __float22half2_rn(make_float2(v.x, v.y));
t.vals[1] = __float22half2_rn(make_float2(v.z, v.w));
return t;
}
__device__ __forceinline__ __half2 to_half2(float2 v) {
return __float22half2_rn(v);
}
__forceinline__ __device__ __half2 hfma2(const __half2 a, const __half2 b, const __half2 c) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
return __hfma2(a, b, c);
#else
float2 fa, fb, fc;
fa = __half22float2(a);
fb = __half22float2(b);
fc = __half22float2(c);
fc.x = fa.x * fb.x + fc.x;
fc.y = fa.y * fb.y + fc.y;
return __float22half2_rn(fc);
#endif
}
__forceinline__ __device__ half hmul(half a, half b) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
return __hmul(a, b);
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
// Reinterpret a pair of uint16_t (packed into a uint32_t) as half2, and multiply by rhs.
__device__ __forceinline__ __half2 hmul_short2(uint32_t lhs, __half rhs) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
__half2 ret;
__half2 rhsp = make_half2(rhs, rhs);
asm("mul.f16x2 %0, %1, %2;" : "=r"(__HALF2_TO_UI(ret)) : "r"(__HALF2_TO_CUI(lhs)), "r"(__HALF2_TO_CUI(rhsp)));
return ret;
#else
__half2 lhs_h2;
__HALF2_TO_UI(lhs_h2) = lhs;
float2 fx = __half22float2(lhs_h2);
float2 fy = __half22float2(make_half2(rhs, rhs));
float2 fr;
fr.x = fx.x * fy.x;
fr.y = fx.y * fy.y;
return __float22half2_rn(fr);
#endif
}
__forceinline__ __device__ half8 dequantize_permuted_int4(uint32_t packedVals, __half2 shift_scale) {
half8 res;
uint32_t v = packedVals;
// What's going on here, you might ask? We extra out 4-bit pairs of integers as 2xuint16 packed into an int32
// via the mask operation, and then we convert them to half precision values.
// As these are all integers in [0, 15], we can actually just interpret the 4-bit integer values as half-precision values.
// We multiply by 4096 x 4096 to go from the 4-bit representation to the equivalent fp16 value,
// or alternatively 32768 * 512 (or 32 when we have shifted the 4-bit value up).
// See e.g. https://gist.github.com/ajtulloch/021254a291a95966bc509db4e34ffeff for a NumPy implementation.
// We do this dance because:
// a) doing bitwise operations on each 4-bit value is expensive on the ALU, and 4-bit to half is expensive on the XU.
// b) doing a 256-entry shared memory LUT on 8-bit pairs is expensive on SMEM throughput.
// Credit to @jhj.
res.vals[0] = hmul_short2(v & 0x000F000F, 32768);
res.vals[1] = hmul_short2(v & 0x00F000F0, 32768);
v >>= 8;
res.vals[2] = hmul_short2(v & 0x000F000F, 32768);
res.vals[3] = hmul_short2(v & 0x00F000F0, 32768);
res.vals[0] =
hfma2(res.vals[0], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[1] =
hfma2(res.vals[1], __half2(hmul(shift_scale.x, 32), hmul(shift_scale.x, 32)),
__half2(shift_scale.y, shift_scale.y));
res.vals[2] =
hfma2(res.vals[2], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[3] =
hfma2(res.vals[3], __half2(hmul(shift_scale.x, 32), hmul(shift_scale.x, 32)),
__half2(shift_scale.y, shift_scale.y));
return res;
}
__forceinline__ __device__ half4 dequantize_permuted_int8(uint32_t packedVals, __half2 shift_scale) {
half4 res;
uint32_t v = packedVals;
// See comment above, this is a minor variation.
res.vals[0] = hmul_short2(v & 0x00FF00FF, 32768);
v >>= 8;
res.vals[1] = hmul_short2(v & 0x00FF00FF, 32768);
res.vals[0] =
hfma2(res.vals[0], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[1] =
hfma2(res.vals[1], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
return res;
}
__forceinline__ __device__ float2 accumulate_fp16(float2 acc, __half2 vals) {
float2 v = __half22float2(vals);
acc.x += v.x;
acc.y += v.y;
return acc;
}
__forceinline__ __device__ float2 accumulate_weighted_fp16(float2 acc, __half2 vals, float weight) {
float2 v = __half22float2(vals);
acc.x = fmaf(v.x, weight, acc.x);
acc.y = fmaf(v.y, weight, acc.y);
return acc;
}
__forceinline__ __device__ float8 accumulate_packed_int4(float8 acc,
uint32_t packedVals,
__half2 shift_scale) {
half8 res = dequantize_permuted_int4(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
float2 v2 = __half22float2(res.vals[2]);
float2 v3 = __half22float2(res.vals[3]);
// Twiddle after permutations.
acc.vals[0].x += v0.x;
acc.vals[0].y += v1.x;
acc.vals[0].z += v2.x;
acc.vals[0].w += v3.x;
acc.vals[1].x += v0.y;
acc.vals[1].y += v1.y;
acc.vals[1].z += v2.y;
acc.vals[1].w += v3.y;
return acc;
}
__forceinline__ __device__ float8 accumulate_weighted_packed_int4(float8 acc,
uint32_t packedVals,
__half2 shift_scale,
float weight) {
half8 res = dequantize_permuted_int4(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
float2 v2 = __half22float2(res.vals[2]);
float2 v3 = __half22float2(res.vals[3]);
// Twiddle after permutations.
acc.vals[0].x = fmaf(v0.x, weight, acc.vals[0].x);
acc.vals[0].y = fmaf(v1.x, weight, acc.vals[0].y);
acc.vals[0].z = fmaf(v2.x, weight, acc.vals[0].z);
acc.vals[0].w = fmaf(v3.x, weight, acc.vals[0].w);
acc.vals[1].x = fmaf(v0.y, weight, acc.vals[1].x);
acc.vals[1].y = fmaf(v1.y, weight, acc.vals[1].y);
acc.vals[1].z = fmaf(v2.y, weight, acc.vals[1].z);
acc.vals[1].w = fmaf(v3.y, weight, acc.vals[1].w);
return acc;
}
__forceinline__ __device__ float4 accumulate_packed_int8(float4 acc,
uint32_t packedVals,
__half2 shift_scale) {
half4 res = dequantize_permuted_int8(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
// Twiddle after permutations.
acc.x += v0.x;
acc.y += v1.x;
acc.z += v0.y;
acc.w += v1.y;
return acc;
}
__forceinline__ __device__ float4 accumulate_weighted_packed_int8(float4 acc,
uint32_t packedVals,
__half2 shift_scale,
float weight) {
half4 res = dequantize_permuted_int8(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
// Twiddle after permutations.
acc.x = fmaf(v0.x, weight, acc.x);
acc.y = fmaf(v1.x, weight, acc.y);
acc.z = fmaf(v0.y, weight, acc.z);
acc.w = fmaf(v1.y, weight, acc.w);
return acc;
}
// ---------------------- start cp.async helpers, copied from CUTLASS
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
// We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to
// the previous internal intrinsics if they are available.
#if (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ >= 11)
//
// This NVVM intrinsic converts an address in shared memory to a plain
// unsigned integer. This is necessary to pass to shared memory instructions
// in inline PTX.
//
// In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2].
//
//__device__ size_t __cvta_generic_to_shared(void* ptr);
/// CUTLASS helper to get SMEM pointer
return static_cast<unsigned>(__cvta_generic_to_shared(ptr));
#elif (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
return __nvvm_get_smem_pointer(ptr);
#elif defined(__CUDA_ARCH__)
uint32_t smem_ptr;
asm(
"{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n"
: "=r"(smem_ptr) : "l"(ptr));
return smem_ptr;
#else
return 0;
#endif
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
__device__ __forceinline__ void cp_async_fence() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
/// Partial specialization
/// Blocks until all but <N> previous cp.async.commit_group operations have committed.
template <int N>
__device__ __forceinline__ void cp_async_wait() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
#endif
}
/// Blocks until all previous cp.async.commit_group operations have committed.
template <>
__device__ __forceinline__ void cp_async_wait<0>() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.wait_all;\n" ::);
#endif
}
/// Partial specialization
template <int SizeInBytes>
__device__ __forceinline__
void cp_async_zfill_cg(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if __CUDA_ARCH__ >= 800
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
"cp.async.cg.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
static_assert(SizeInBytes == 16, "");
using AccessType = uint4;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
} else {
AccessType zeros;
zeros.x = 0;
zeros.y = 0;
zeros.z = 0;
zeros.w = 0;
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
/// Copy with zero fill
template <int SizeInBytes>
__device__ __forceinline__
void cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if __CUDA_ARCH__ >= 800
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
"cp.async.ca.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
static_assert(SizeInBytes == 16, "");
using AccessType = uint4;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
} else {
AccessType zeros;
zeros.x = 0;
zeros.y = 0;
zeros.z = 0;
zeros.w = 0;
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
// TODO: increase code sharing (templates for accumulator_ty, accumulation, outputs per thread, etc?)
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void fp16_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::FP16) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 2;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float2 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float2();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
__half2 v = reinterpret_cast<const __half2*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_fp16(accumulators[i][j], v, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_fp16(accumulators[i][j], v) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].x *= inv_L;
accumulators[i][j].y *= inv_L;
}
half2 val = to_half2(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
*reinterpret_cast<int1*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int1*>(&val);
}
}
}
}
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void int_4bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::INT4) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 8;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float8 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float8();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
half2 shift_scale = reinterpret_cast<const half2*>(row)[0];
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
uint32_t v = reinterpret_cast<const uint32_t*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_packed_int4(accumulators[i][j], v, shift_scale, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_packed_int4(accumulators[i][j], v, shift_scale) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
bool aligned_16b = intptr_t(&output[b][D_start + output_d]) % 16 == 0;
bool aligned_8b = intptr_t(&output[b][D_start + output_d]) % 8 == 0;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].vals[0].x *= inv_L;
accumulators[i][j].vals[0].y *= inv_L;
accumulators[i][j].vals[0].z *= inv_L;
accumulators[i][j].vals[0].w *= inv_L;
accumulators[i][j].vals[1].x *= inv_L;
accumulators[i][j].vals[1].y *= inv_L;
accumulators[i][j].vals[1].z *= inv_L;
accumulators[i][j].vals[1].w *= inv_L;
}
half8 val = to_half8(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
if (aligned_16b) {
*reinterpret_cast<int4*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int4*>(&val);
} else if (aligned_8b) {
auto v = *reinterpret_cast<const int4*>(&val);
*reinterpret_cast<int2*>(&output[b][D_start + output_d + 0]) = make_int2(v.x, v.y);
*reinterpret_cast<int2*>(&output[b][D_start + output_d + 4]) = make_int2(v.z, v.w);
} else {
auto v = *reinterpret_cast<const int4*>(&val);
*reinterpret_cast<int*>(&output[b][D_start + output_d + 0]) = v.x;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 2]) = v.y;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 4]) = v.z;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 6]) = v.w;
}
}
}
}
}
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void int_8bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::INT8) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 4;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float4 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float4();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
half2 shift_scale = reinterpret_cast<const half2*>(row)[0];
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
uint32_t v = reinterpret_cast<const uint32_t*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_packed_int8(accumulators[i][j], v, shift_scale, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_packed_int8(accumulators[i][j], v, shift_scale) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
bool aligned_8b = intptr_t(&output[b][D_start + output_d]) % 8 == 0;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].x *= inv_L;
accumulators[i][j].y *= inv_L;
accumulators[i][j].z *= inv_L;
accumulators[i][j].w *= inv_L;
}
half4 val = to_half4(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
if (aligned_8b) {
*reinterpret_cast<int2*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int2*>(&val);
} else {
auto v = *reinterpret_cast<const int2*>(&val);
*reinterpret_cast<int*>(&output[b][D_start + output_d + 0]) = v.x;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 2]) = v.y;
}
}
}
}
}
__device__ inline uint32_t pruned_hash_function(uint32_t h) {
// MurmorHash3 32-bit mixing function.
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
__global__ void int_nbit_split_embedding_codegen_forward_pruned_hashmap_lookup_{{ wdesc }}_kernel(
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> offsets,
const PackedTensorAccessor64<int32_t, 2, RestrictPtrTraits> hash_table,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_table_offsets,
int32_t B,
int32_t T,
PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> dense_indices) {
// uint32_t capacity = hash_table.size(0);
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
int64_t table_start = hash_table_offsets[t];
int64_t table_end = hash_table_offsets[t + 1];
int64_t capacity = table_end - table_start;
if (capacity == 0) {
// No pruning applied on the indices associated with this table.
for (int32_t l = threadIdx.x; l < L; l += blockDim.x) {
dense_indices[indices_start + l] = indices[indices_start + l];
}
return;
}
uint32_t subwarp_id = threadIdx.x / 4;
uint32_t subwarp_tid = threadIdx.x % 4;
uint32_t subwarp_mask = static_cast<uint32_t>(0xF) << (4 * subwarp_id);
for (int32_t l_start = 0; l_start + subwarp_id < L; l_start += kWarpSize / 4) {
int32_t idx = indices[indices_start + l_start + subwarp_id];
uint32_t slot_start = pruned_hash_function(static_cast<uint32_t>(idx)) % capacity;
while (true) {
uint32_t slot = (slot_start + subwarp_tid) % capacity;
int2 val = *reinterpret_cast<const int2*>(&hash_table[table_start + static_cast<int64_t>(slot)][0]);
int32_t slot_sparse_idx = val.x;
int32_t slot_dense_idx = val.y;
bool found = false;
bool empty = false;
if (slot_sparse_idx == -1) {
empty = true;
} else if (slot_sparse_idx == idx) {
found = true;
dense_indices[indices_start + l_start + subwarp_id] = slot_dense_idx;
}
if (__any_sync(subwarp_mask, found)) {
break;
} else if (__any_sync(subwarp_mask, empty)) {
dense_indices[indices_start + l_start + subwarp_id] = -1;
break;
}
slot_start += 4;
}
}
}
}
at::Tensor int_nbit_split_embedding_codegen_forward_{{ wdesc }}_cuda(
at::Tensor dev_weights,
at::Tensor weights_offsets,
at::Tensor weights_tys,
at::Tensor D_offsets,
int64_t total_D,
int64_t max_int2_D,
int64_t max_int4_D,
int64_t max_int8_D,
int64_t max_float16_D,
at::Tensor indices,
at::Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
at::Tensor indice_weights,
{% endif %}
int64_t unused
) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(dev_weights.get_device());
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(total_D > 0);
TORCH_CHECK(max_int2_D == 0);
auto output = at::empty({B, total_D}, dev_weights.options().dtype(at::kHalf));
using index_t = int32_t;
// launch 4-bit kernel
constexpr int32_t kWarpsPerBlock = 4;
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::int_4bit_split_embedding_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows>), \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
if (max_int4_D > 0) {
auto max_int4_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int4_D, nbit::SparseType::INT4), 128);
TORCH_CHECK(max_int4_128b_rows <= 4);
if (max_int4_128b_rows > 0) {
X(2, 8, 0, 1);
}
if (max_int4_128b_rows > 1) {
X(2, 4, 1, 2);
}
if (max_int4_128b_rows > 2) {
X(1, 4, 2, 4);
}
}
#undef X
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::int_8bit_split_embedding_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows>), \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
if (max_int8_D > 0) {
auto max_int8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int8_D, nbit::SparseType::INT8), 128);
TORCH_CHECK(max_int8_128b_rows <= 8);
if (max_int8_128b_rows > 0) {
X(2, 8, 0, 1);
}
if (max_int8_128b_rows > 1) {
X(2, 4, 1, 2);
}
if (max_int8_128b_rows > 2) {
X(2, 4, 2, 4);
}
if (max_int8_128b_rows > 4) {
X(2, 4, 4, 8);
}
}
#undef X
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::fp16_split_embedding_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows>), \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
if (max_float16_D > 0) {
auto max_fp16_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float16_D, nbit::SparseType::FP16), 128);
TORCH_CHECK(max_fp16_128b_rows <= 16);
if (max_fp16_128b_rows > 0) {
X(2, 8, 0, 2);
}
if (max_fp16_128b_rows > 2) {
X(2, 8, 2, 4);
}
if (max_fp16_128b_rows > 4) {
X(2, 4, 4, 8);
}
if (max_fp16_128b_rows > 8) {
X(2, 2, 8, 16);
}
}
#undef X
// TODO: 2-bit kernels.
return output;
}
at::Tensor pruned_hashmap_lookup_{{ wdesc }}_cuda(
at::Tensor indices,
at::Tensor offsets,
at::Tensor hash_table,
at::Tensor hash_table_offsets) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(indices.get_device());
auto dense_indices = at::empty_like(indices);
int32_t T = hash_table_offsets.size(0) - 1;
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(hash_table.size(0) < std::numeric_limits<int32_t>::max());
constexpr size_t kForwardMaxThreads = 256;
nbit::int_nbit_split_embedding_codegen_forward_pruned_hashmap_lookup_{{ wdesc }hipLaunchKernelGGL((}_kernel),
dim3(nbit::div_round_up(B * T + 1, kForwardMaxThreads / 32)),
dim3(dim3(32, kForwardMaxThreads / 32)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
indices.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
hash_table.packed_accessor64<int32_t, 2, at::RestrictPtrTraits>(),
hash_table_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
B,
T,
dense_indices.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>()
);
C10_HIP_KERNEL_LAUNCH_CHECK();
return dense_indices;
}
|
1ccb4b74ebf5b76f97fdf8ce88e7982bd7ef3980.cu
|
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_forward_template_helpers.cuh"
namespace nbit {
using namespace at;
using namespace fbgemm_gpu;
// Keep in sync with split_embedding_configs.py:SparseType
enum class SparseType : uint8_t {
FP32 = 0,
FP16 = 1,
INT8 = 2,
INT4 = 3,
INT2 = 4,
};
__forceinline__ __host__ __device__ uint32_t round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b) * b;
}
__forceinline__ __host__ __device__ uint32_t div_round_up(uint32_t a, uint32_t b) {
return ((a + b - 1) / b);
}
__host__ __device__ inline int32_t unpadded_row_size_in_bytes(int32_t dim, SparseType weight_ty) {
if (weight_ty == SparseType::FP16) { return dim * 2; }
if (weight_ty == SparseType::INT8) { return dim + 4; }
if (weight_ty == SparseType::INT4) { return dim / 2 + 4; }
if (weight_ty == SparseType::INT2) { return dim / 4 + 4; }
return 0;
}
__host__ __device__ inline int32_t padded_row_size_in_bytes(int32_t dim, SparseType weight_ty) {
auto r = unpadded_row_size_in_bytes(dim, weight_ty);
return round_up(r, 16);
}
// "Effective" number of elements in the row when we include the row-wise quantization parameters.
__device__ inline int32_t padded_D(int32_t dim, SparseType weight_ty) {
if (weight_ty == SparseType::FP16) { return dim; }
if (weight_ty == SparseType::INT8) { return dim + 4; }
if (weight_ty == SparseType::INT4) { return dim + 8; }
if (weight_ty == SparseType::INT2) { return dim + 16; }
return 0;
}
struct __align__(32) float8 {
__host__ __device__ float8() {}
float4 vals[2];
};
struct __align__(8) half4 {
__host__ __device__ half4() {}
half2 vals[2];
};
struct __align__(16) half8 {
__host__ __device__ half8() {}
half2 vals[4];
};
__device__ __forceinline__ float4 make_zero_float4() {
return make_float4(0, 0, 0, 0);
}
__device__ __forceinline__ float8 make_zero_float8() {
float8 t;
t.vals[0] = make_float4(0, 0, 0, 0);
t.vals[1] = make_float4(0, 0, 0, 0);
return t;
}
__device__ __forceinline__ float2 make_zero_float2() {
return make_float2(0, 0);
}
__device__ __forceinline__ half8 to_half8(float8 v) {
half8 t;
t.vals[0] = __float22half2_rn(make_float2(v.vals[0].x, v.vals[0].y));
t.vals[1] = __float22half2_rn(make_float2(v.vals[0].z, v.vals[0].w));
t.vals[2] = __float22half2_rn(make_float2(v.vals[1].x, v.vals[1].y));
t.vals[3] = __float22half2_rn(make_float2(v.vals[1].z, v.vals[1].w));
return t;
}
__device__ __forceinline__ half4 to_half4(float4 v) {
half4 t;
t.vals[0] = __float22half2_rn(make_float2(v.x, v.y));
t.vals[1] = __float22half2_rn(make_float2(v.z, v.w));
return t;
}
__device__ __forceinline__ __half2 to_half2(float2 v) {
return __float22half2_rn(v);
}
__forceinline__ __device__ __half2 hfma2(const __half2 a, const __half2 b, const __half2 c) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
return __hfma2(a, b, c);
#else
float2 fa, fb, fc;
fa = __half22float2(a);
fb = __half22float2(b);
fc = __half22float2(c);
fc.x = fa.x * fb.x + fc.x;
fc.y = fa.y * fb.y + fc.y;
return __float22half2_rn(fc);
#endif
}
__forceinline__ __device__ half hmul(half a, half b) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
return __hmul(a, b);
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
// Reinterpret a pair of uint16_t (packed into a uint32_t) as half2, and multiply by rhs.
__device__ __forceinline__ __half2 hmul_short2(uint32_t lhs, __half rhs) {
#if __CUDA_ARCH__ >= 530 && __CUDA_ARCH__ != 610
__half2 ret;
__half2 rhsp = make_half2(rhs, rhs);
asm("mul.f16x2 %0, %1, %2;" : "=r"(__HALF2_TO_UI(ret)) : "r"(__HALF2_TO_CUI(lhs)), "r"(__HALF2_TO_CUI(rhsp)));
return ret;
#else
__half2 lhs_h2;
__HALF2_TO_UI(lhs_h2) = lhs;
float2 fx = __half22float2(lhs_h2);
float2 fy = __half22float2(make_half2(rhs, rhs));
float2 fr;
fr.x = fx.x * fy.x;
fr.y = fx.y * fy.y;
return __float22half2_rn(fr);
#endif
}
__forceinline__ __device__ half8 dequantize_permuted_int4(uint32_t packedVals, __half2 shift_scale) {
half8 res;
uint32_t v = packedVals;
// What's going on here, you might ask? We extra out 4-bit pairs of integers as 2xuint16 packed into an int32
// via the mask operation, and then we convert them to half precision values.
// As these are all integers in [0, 15], we can actually just interpret the 4-bit integer values as half-precision values.
// We multiply by 4096 x 4096 to go from the 4-bit representation to the equivalent fp16 value,
// or alternatively 32768 * 512 (or 32 when we have shifted the 4-bit value up).
// See e.g. https://gist.github.com/ajtulloch/021254a291a95966bc509db4e34ffeff for a NumPy implementation.
// We do this dance because:
// a) doing bitwise operations on each 4-bit value is expensive on the ALU, and 4-bit to half is expensive on the XU.
// b) doing a 256-entry shared memory LUT on 8-bit pairs is expensive on SMEM throughput.
// Credit to @jhj.
res.vals[0] = hmul_short2(v & 0x000F000F, 32768);
res.vals[1] = hmul_short2(v & 0x00F000F0, 32768);
v >>= 8;
res.vals[2] = hmul_short2(v & 0x000F000F, 32768);
res.vals[3] = hmul_short2(v & 0x00F000F0, 32768);
res.vals[0] =
hfma2(res.vals[0], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[1] =
hfma2(res.vals[1], __half2(hmul(shift_scale.x, 32), hmul(shift_scale.x, 32)),
__half2(shift_scale.y, shift_scale.y));
res.vals[2] =
hfma2(res.vals[2], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[3] =
hfma2(res.vals[3], __half2(hmul(shift_scale.x, 32), hmul(shift_scale.x, 32)),
__half2(shift_scale.y, shift_scale.y));
return res;
}
__forceinline__ __device__ half4 dequantize_permuted_int8(uint32_t packedVals, __half2 shift_scale) {
half4 res;
uint32_t v = packedVals;
// See comment above, this is a minor variation.
res.vals[0] = hmul_short2(v & 0x00FF00FF, 32768);
v >>= 8;
res.vals[1] = hmul_short2(v & 0x00FF00FF, 32768);
res.vals[0] =
hfma2(res.vals[0], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
res.vals[1] =
hfma2(res.vals[1], __half2(hmul(shift_scale.x, 512), hmul(shift_scale.x, 512)),
__half2(shift_scale.y, shift_scale.y));
return res;
}
__forceinline__ __device__ float2 accumulate_fp16(float2 acc, __half2 vals) {
float2 v = __half22float2(vals);
acc.x += v.x;
acc.y += v.y;
return acc;
}
__forceinline__ __device__ float2 accumulate_weighted_fp16(float2 acc, __half2 vals, float weight) {
float2 v = __half22float2(vals);
acc.x = fmaf(v.x, weight, acc.x);
acc.y = fmaf(v.y, weight, acc.y);
return acc;
}
__forceinline__ __device__ float8 accumulate_packed_int4(float8 acc,
uint32_t packedVals,
__half2 shift_scale) {
half8 res = dequantize_permuted_int4(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
float2 v2 = __half22float2(res.vals[2]);
float2 v3 = __half22float2(res.vals[3]);
// Twiddle after permutations.
acc.vals[0].x += v0.x;
acc.vals[0].y += v1.x;
acc.vals[0].z += v2.x;
acc.vals[0].w += v3.x;
acc.vals[1].x += v0.y;
acc.vals[1].y += v1.y;
acc.vals[1].z += v2.y;
acc.vals[1].w += v3.y;
return acc;
}
__forceinline__ __device__ float8 accumulate_weighted_packed_int4(float8 acc,
uint32_t packedVals,
__half2 shift_scale,
float weight) {
half8 res = dequantize_permuted_int4(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
float2 v2 = __half22float2(res.vals[2]);
float2 v3 = __half22float2(res.vals[3]);
// Twiddle after permutations.
acc.vals[0].x = fmaf(v0.x, weight, acc.vals[0].x);
acc.vals[0].y = fmaf(v1.x, weight, acc.vals[0].y);
acc.vals[0].z = fmaf(v2.x, weight, acc.vals[0].z);
acc.vals[0].w = fmaf(v3.x, weight, acc.vals[0].w);
acc.vals[1].x = fmaf(v0.y, weight, acc.vals[1].x);
acc.vals[1].y = fmaf(v1.y, weight, acc.vals[1].y);
acc.vals[1].z = fmaf(v2.y, weight, acc.vals[1].z);
acc.vals[1].w = fmaf(v3.y, weight, acc.vals[1].w);
return acc;
}
__forceinline__ __device__ float4 accumulate_packed_int8(float4 acc,
uint32_t packedVals,
__half2 shift_scale) {
half4 res = dequantize_permuted_int8(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
// Twiddle after permutations.
acc.x += v0.x;
acc.y += v1.x;
acc.z += v0.y;
acc.w += v1.y;
return acc;
}
__forceinline__ __device__ float4 accumulate_weighted_packed_int8(float4 acc,
uint32_t packedVals,
__half2 shift_scale,
float weight) {
half4 res = dequantize_permuted_int8(packedVals, shift_scale);
// Accumulate in float32.
float2 v0 = __half22float2(res.vals[0]);
float2 v1 = __half22float2(res.vals[1]);
// Twiddle after permutations.
acc.x = fmaf(v0.x, weight, acc.x);
acc.y = fmaf(v1.x, weight, acc.y);
acc.z = fmaf(v0.y, weight, acc.z);
acc.w = fmaf(v1.y, weight, acc.w);
return acc;
}
// ---------------------- start cp.async helpers, copied from CUTLASS
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
// We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to
// the previous internal intrinsics if they are available.
#if (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ >= 11)
//
// This NVVM intrinsic converts an address in shared memory to a plain
// unsigned integer. This is necessary to pass to shared memory instructions
// in inline PTX.
//
// In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2].
//
//__device__ size_t __cvta_generic_to_shared(void* ptr);
/// CUTLASS helper to get SMEM pointer
return static_cast<unsigned>(__cvta_generic_to_shared(ptr));
#elif (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
return __nvvm_get_smem_pointer(ptr);
#elif defined(__CUDA_ARCH__)
uint32_t smem_ptr;
asm(
"{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n"
: "=r"(smem_ptr) : "l"(ptr));
return smem_ptr;
#else
return 0;
#endif
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
__device__ __forceinline__ void cp_async_fence() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
/// Partial specialization
/// Blocks until all but <N> previous cp.async.commit_group operations have committed.
template <int N>
__device__ __forceinline__ void cp_async_wait() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
#endif
}
/// Blocks until all previous cp.async.commit_group operations have committed.
template <>
__device__ __forceinline__ void cp_async_wait<0>() {
#if __CUDA_ARCH__ >= 800
asm volatile("cp.async.wait_all;\n" ::);
#endif
}
/// Partial specialization
template <int SizeInBytes>
__device__ __forceinline__
void cp_async_zfill_cg(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if __CUDA_ARCH__ >= 800
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
"cp.async.cg.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
static_assert(SizeInBytes == 16, "");
using AccessType = uint4;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
} else {
AccessType zeros;
zeros.x = 0;
zeros.y = 0;
zeros.z = 0;
zeros.w = 0;
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
/// Copy with zero fill
template <int SizeInBytes>
__device__ __forceinline__
void cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if __CUDA_ARCH__ >= 800
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
"cp.async.ca.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
static_assert(SizeInBytes == 16, "");
using AccessType = uint4;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
} else {
AccessType zeros;
zeros.x = 0;
zeros.y = 0;
zeros.z = 0;
zeros.w = 0;
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
// TODO: increase code sharing (templates for accumulator_ty, accumulation, outputs per thread, etc?)
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void fp16_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::FP16) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 2;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float2 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float2();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
__half2 v = reinterpret_cast<const __half2*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_fp16(accumulators[i][j], v, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_fp16(accumulators[i][j], v) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].x *= inv_L;
accumulators[i][j].y *= inv_L;
}
half2 val = to_half2(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
*reinterpret_cast<int1*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int1*>(&val);
}
}
}
}
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void int_4bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::INT4) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 8;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float8 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float8();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
half2 shift_scale = reinterpret_cast<const half2*>(row)[0];
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
uint32_t v = reinterpret_cast<const uint32_t*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_packed_int4(accumulators[i][j], v, shift_scale, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_packed_int4(accumulators[i][j], v, shift_scale) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
bool aligned_16b = intptr_t(&output[b][D_start + output_d]) % 16 == 0;
bool aligned_8b = intptr_t(&output[b][D_start + output_d]) % 8 == 0;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].vals[0].x *= inv_L;
accumulators[i][j].vals[0].y *= inv_L;
accumulators[i][j].vals[0].z *= inv_L;
accumulators[i][j].vals[0].w *= inv_L;
accumulators[i][j].vals[1].x *= inv_L;
accumulators[i][j].vals[1].y *= inv_L;
accumulators[i][j].vals[1].z *= inv_L;
accumulators[i][j].vals[1].w *= inv_L;
}
half8 val = to_half8(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
if (aligned_16b) {
*reinterpret_cast<int4*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int4*>(&val);
} else if (aligned_8b) {
auto v = *reinterpret_cast<const int4*>(&val);
*reinterpret_cast<int2*>(&output[b][D_start + output_d + 0]) = make_int2(v.x, v.y);
*reinterpret_cast<int2*>(&output[b][D_start + output_d + 4]) = make_int2(v.z, v.w);
} else {
auto v = *reinterpret_cast<const int4*>(&val);
*reinterpret_cast<int*>(&output[b][D_start + output_d + 0]) = v.x;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 2]) = v.y;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 4]) = v.z;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 6]) = v.w;
}
}
}
}
}
template<typename index_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows>
__launch_bounds__(WarpsPerBlock * 32)
__global__ void int_8bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L(
const PackedTensorAccessor64<uint8_t, 1, RestrictPtrTraits> dev_weights,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<uint8_t, 1, RestrictPtrTraits> weights_tys,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<float, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
PackedTensorAccessor32<Half, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t bb_t = blockIdx.x * blockDim.y + threadIdx.y;
if (bb_t >= div_round_up(B, OutputRowsPerThread) * T) {
return;
}
uint32_t t = bb_t / div_round_up(B, OutputRowsPerThread);
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
SparseType weight_ty = static_cast<SparseType>(weights_tys[t]);
if (weight_ty != SparseType::INT8) {
return;
}
const int32_t D_bytes = padded_row_size_in_bytes(D, weight_ty);
if (D_bytes <= MinNum128BRows * 128 || D_bytes > MaxNum128BRows * 128) {
return;
}
uint32_t bb = bb_t % div_round_up(B, OutputRowsPerThread);
int64_t weights_offset = weights_offsets[t];
const int32_t D_total = padded_D(D, weight_ty);
const int32_t D_padding = D_total - D;
uint32_t warp_idx = threadIdx.y;
int32_t indices_starts[OutputRowsPerThread];
int32_t Ls[OutputRowsPerThread];
int32_t max_Ls = 0;
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
indices_starts[i] = indices_start;
Ls[i] = indices_end - indices_start;
max_Ls = max(max_Ls, Ls[i]);
}
const uint8_t* __restrict__ weights = &dev_weights[weights_offset];
constexpr size_t kOutputsPerThread = 4;
constexpr uint32_t NumUint4PerRow = MaxNum128BRows * 128 / sizeof(uint4);
const uint32_t uint4_loads_per_row = div_round_up(D_bytes, sizeof(uint4));
float4 accumulators[OutputRowsPerThread][MaxNum128BRows];
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
accumulators[i][j] = make_zero_float4();
}
}
for (uint32_t L_start = 0; L_start < max_Ls; L_start += InputRowsInFlight) {
uint32_t input_rows_in_flight = min(static_cast<uint32_t>(InputRowsInFlight), max_Ls - L_start);
typedef uint4 AllBuffers[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight][NumUint4PerRow];
__shared__ AllBuffers buffers;
{% if weighted %}
typedef float AllIndiceWeights[WarpsPerBlock][OutputRowsPerThread][InputRowsInFlight];
__shared__ AllIndiceWeights buffers_indice_weights;
{% endif %}
for (uint32_t load_idx = threadIdx.x; load_idx < input_rows_in_flight * uint4_loads_per_row; load_idx += kWarpSize) {
uint32_t row_load_idx = load_idx % uint4_loads_per_row;
uint32_t input_row_idx = (load_idx / uint4_loads_per_row);
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
int32_t idx = valid ? indices[indices_starts[i] + L_start + input_row_idx] : -1;
const uint4* row = valid ? reinterpret_cast<const uint4*>(&weights[static_cast<int64_t>(idx) * D_bytes]) : reinterpret_cast<const uint4*>(&weights[0]);
cp_async_zfill_cg<sizeof(uint4)>(&buffers[warp_idx][i][input_row_idx][row_load_idx], &row[row_load_idx], valid);
{% if weighted %}
buffers_indice_weights[warp_idx][i][input_row_idx] = valid ? indice_weights[indices_starts[i] + L_start + input_row_idx] : 0.0;
{% endif %}
}
}
// equivalent to fence + wait.
cp_async_wait<0>();
__syncwarp();
for (uint32_t input_row_idx = 0; input_row_idx < input_rows_in_flight; ++input_row_idx) {
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
bool valid = L_start + input_row_idx < Ls[i];
const uint32_t* row = reinterpret_cast<const uint32_t*>(&buffers[warp_idx][i][input_row_idx][0]);
half2 shift_scale = reinterpret_cast<const half2*>(row)[0];
{% if weighted %}
float row_weight = buffers_indice_weights[warp_idx][i][input_row_idx];
{% endif %}
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
uint32_t v = reinterpret_cast<const uint32_t*>(row)[kWarpSize * j + threadIdx.x];
{% if weighted %}
accumulators[i][j] = valid ? accumulate_weighted_packed_int8(accumulators[i][j], v, shift_scale, row_weight) : accumulators[i][j];
{% else %}
accumulators[i][j] = valid ? accumulate_packed_int8(accumulators[i][j], v, shift_scale) : accumulators[i][j];
{% endif %}
}
}
}
}
#pragma unroll OutputRowsPerThread
for (uint32_t i = 0; i < OutputRowsPerThread; ++i) {
uint32_t b = min(static_cast<uint32_t>(bb * OutputRowsPerThread + i), static_cast<uint32_t>(B - 1));
#pragma unroll MaxNum128BRows
for (uint32_t j = 0; j < MaxNum128BRows; ++j) {
int32_t output_d = kWarpSize * j * kOutputsPerThread + threadIdx.x * kOutputsPerThread - D_padding;
bool aligned_8b = intptr_t(&output[b][D_start + output_d]) % 8 == 0;
if (pooling_mode == MEAN && Ls[i] != 0) {
float inv_L = static_cast<float>(1.0) / static_cast<float>(Ls[i]);
accumulators[i][j].x *= inv_L;
accumulators[i][j].y *= inv_L;
accumulators[i][j].z *= inv_L;
accumulators[i][j].w *= inv_L;
}
half4 val = to_half4(accumulators[i][j]);
if (output_d >= 0 && output_d < D) {
if (aligned_8b) {
*reinterpret_cast<int2*>(&output[b][D_start + output_d]) = *reinterpret_cast<const int2*>(&val);
} else {
auto v = *reinterpret_cast<const int2*>(&val);
*reinterpret_cast<int*>(&output[b][D_start + output_d + 0]) = v.x;
*reinterpret_cast<int*>(&output[b][D_start + output_d + 2]) = v.y;
}
}
}
}
}
__device__ inline uint32_t pruned_hash_function(uint32_t h) {
// MurmorHash3 32-bit mixing function.
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
__global__ void int_nbit_split_embedding_codegen_forward_pruned_hashmap_lookup_{{ wdesc }}_kernel(
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> offsets,
const PackedTensorAccessor64<int32_t, 2, RestrictPtrTraits> hash_table,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_table_offsets,
int32_t B,
int32_t T,
PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> dense_indices) {
// uint32_t capacity = hash_table.size(0);
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int32_t indices_start = offsets[t * B + b];
int32_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
int64_t table_start = hash_table_offsets[t];
int64_t table_end = hash_table_offsets[t + 1];
int64_t capacity = table_end - table_start;
if (capacity == 0) {
// No pruning applied on the indices associated with this table.
for (int32_t l = threadIdx.x; l < L; l += blockDim.x) {
dense_indices[indices_start + l] = indices[indices_start + l];
}
return;
}
uint32_t subwarp_id = threadIdx.x / 4;
uint32_t subwarp_tid = threadIdx.x % 4;
uint32_t subwarp_mask = static_cast<uint32_t>(0xF) << (4 * subwarp_id);
for (int32_t l_start = 0; l_start + subwarp_id < L; l_start += kWarpSize / 4) {
int32_t idx = indices[indices_start + l_start + subwarp_id];
uint32_t slot_start = pruned_hash_function(static_cast<uint32_t>(idx)) % capacity;
while (true) {
uint32_t slot = (slot_start + subwarp_tid) % capacity;
int2 val = *reinterpret_cast<const int2*>(&hash_table[table_start + static_cast<int64_t>(slot)][0]);
int32_t slot_sparse_idx = val.x;
int32_t slot_dense_idx = val.y;
bool found = false;
bool empty = false;
if (slot_sparse_idx == -1) {
empty = true;
} else if (slot_sparse_idx == idx) {
found = true;
dense_indices[indices_start + l_start + subwarp_id] = slot_dense_idx;
}
if (__any_sync(subwarp_mask, found)) {
break;
} else if (__any_sync(subwarp_mask, empty)) {
dense_indices[indices_start + l_start + subwarp_id] = -1;
break;
}
slot_start += 4;
}
}
}
}
at::Tensor int_nbit_split_embedding_codegen_forward_{{ wdesc }}_cuda(
at::Tensor dev_weights,
at::Tensor weights_offsets,
at::Tensor weights_tys,
at::Tensor D_offsets,
int64_t total_D,
int64_t max_int2_D,
int64_t max_int4_D,
int64_t max_int8_D,
int64_t max_float16_D,
at::Tensor indices,
at::Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
at::Tensor indice_weights,
{% endif %}
int64_t unused
) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(dev_weights.get_device());
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(total_D > 0);
TORCH_CHECK(max_int2_D == 0);
auto output = at::empty({B, total_D}, dev_weights.options().dtype(at::kHalf));
using index_t = int32_t;
// launch 4-bit kernel
constexpr int32_t kWarpsPerBlock = 4;
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::int_4bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows><<< \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::cuda::getCurrentCUDAStream()>>>( \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
if (max_int4_D > 0) {
auto max_int4_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int4_D, nbit::SparseType::INT4), 128);
TORCH_CHECK(max_int4_128b_rows <= 4);
if (max_int4_128b_rows > 0) {
X(2, 8, 0, 1);
}
if (max_int4_128b_rows > 1) {
X(2, 4, 1, 2);
}
if (max_int4_128b_rows > 2) {
X(1, 4, 2, 4);
}
}
#undef X
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::int_8bit_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows><<< \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::cuda::getCurrentCUDAStream()>>>( \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
if (max_int8_D > 0) {
auto max_int8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int8_D, nbit::SparseType::INT8), 128);
TORCH_CHECK(max_int8_128b_rows <= 8);
if (max_int8_128b_rows > 0) {
X(2, 8, 0, 1);
}
if (max_int8_128b_rows > 1) {
X(2, 4, 1, 2);
}
if (max_int8_128b_rows > 2) {
X(2, 4, 2, 4);
}
if (max_int8_128b_rows > 4) {
X(2, 4, 4, 8);
}
}
#undef X
#define X(OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \
nbit::fp16_split_embedding_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows><<< \
nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \
dim3(nbit::kWarpSize, kWarpsPerBlock), \
0, \
at::cuda::getCurrentCUDAStream()>>>( \
dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \
weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \
indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \
pooling_mode, \
{% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \
output.packed_accessor32<at::Half, 2, at::RestrictPtrTraits>() \
); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
if (max_float16_D > 0) {
auto max_fp16_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float16_D, nbit::SparseType::FP16), 128);
TORCH_CHECK(max_fp16_128b_rows <= 16);
if (max_fp16_128b_rows > 0) {
X(2, 8, 0, 2);
}
if (max_fp16_128b_rows > 2) {
X(2, 8, 2, 4);
}
if (max_fp16_128b_rows > 4) {
X(2, 4, 4, 8);
}
if (max_fp16_128b_rows > 8) {
X(2, 2, 8, 16);
}
}
#undef X
// TODO: 2-bit kernels.
return output;
}
at::Tensor pruned_hashmap_lookup_{{ wdesc }}_cuda(
at::Tensor indices,
at::Tensor offsets,
at::Tensor hash_table,
at::Tensor hash_table_offsets) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(indices.get_device());
auto dense_indices = at::empty_like(indices);
int32_t T = hash_table_offsets.size(0) - 1;
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(hash_table.size(0) < std::numeric_limits<int32_t>::max());
constexpr size_t kForwardMaxThreads = 256;
nbit::int_nbit_split_embedding_codegen_forward_pruned_hashmap_lookup_{{ wdesc }}_kernel<<<
nbit::div_round_up(B * T + 1, kForwardMaxThreads / 32),
dim3(32, kForwardMaxThreads / 32),
0,
at::cuda::getCurrentCUDAStream()>>>(
indices.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
hash_table.packed_accessor64<int32_t, 2, at::RestrictPtrTraits>(),
hash_table_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
B,
T,
dense_indices.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>()
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return dense_indices;
}
|
a655ea3700599e85786004865010d3bfbb7146d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2015, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
#define N_PER_T 16
// step size of the normals
// for PointXYZI
#define X_STEP 3 // 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern void loadRGBvaluesForMFaxes()
{
float h_rgbForMFaxes[7];
for (uint32_t k=0; k<7; ++k)
{
union{
uint8_t asByte[4];
uint32_t asInt;
float asFloat;
} rgb;
rgb.asInt = 0;
if(k ==0)
rgb.asInt = (255 << 16) | (0 << 8) | 0;
else if (k ==1)
rgb.asInt = (255 << 16) | (100 << 8) | 100;
else if (k ==2)
rgb.asInt = (0 << 16) | (255 << 8) | 0;
else if (k ==3)
rgb.asInt = (100 << 16) | (255 << 8) | 100;
else if (k ==4)
rgb.asInt = (0 << 16) | (0 << 8) | 255;
else if (k ==5)
rgb.asInt = (100 << 16) | (100 << 8) | 255;
else if (k ==6)
rgb.asInt = (200 << 16) | (200 << 8) | 200;
h_rgbForMFaxes[k] = rgb.asFloat; //*(float *)(&rgb);
}
hipMemcpyToSymbol(c_rgbForMFaxes, h_rgbForMFaxes , 7* sizeof(float));
}
/*
* Given assignments z of normals x to MF axes compute the costfunction value
*/
__global__ void robustSquaredAngleCostFct(float *cost, float *x,
uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
//const int tid = threadIdx.x;
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
// xi[tid*3] = x[tid];
// xi[tid*3+1] = x[tid+Nx];
// xi[tid*3+2] = x[tid+Nx*2];
uint32_t k = z[id];
if (k<6)
{
// k==6 means that xi is nan
float xiTy = x[id*X_STEP+X_OFFSET]*mui[k] + x[id*X_STEP+X_OFFSET+1]*mui[k+6]
+ x[id*X_STEP+X_OFFSET+2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
//float errSq = err*err;
rho[tid] += (err*err)/(err*err+sigma_sq);
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
rho[tid] += rho[tid + s];
__syncthreads();
}
if(tid==0 && rho[0]!=0 ) {
atomicAdd(&cost[0],rho[0]);
}
}
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
uint32_t k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] += sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] += sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] += sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]+J_shared[k*BLOCK_SIZE+1]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void robustSquaredAngleCostFctAssignment(float *cost, uint32_t* W,
float *x, uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Wi[BLOCK_SIZE];
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
uint32_t k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (uint32_t k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] += (err_min*err_min)/(err_min*err_min+sigma_sq);
Wi[tid] += 1;
}
z[id] = k_min;
// errs[id] = err_min;
// if(X_STEP == 8)
// {
// x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
// x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
// x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
//// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
// }
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Wi[tid] += Wi[tid + s];
}
__syncthreads();
}
if(tid==0) {
atomicAdd(&cost[0],rho[0]+rho[1]);
}
if(tid==1) {
atomicAdd(W,Wi[0]+Wi[1]);
}
}
#include "optimizationSO3_weighted.cu"
|
a655ea3700599e85786004865010d3bfbb7146d6.cu
|
/* Copyright (c) 2015, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
#define N_PER_T 16
// step size of the normals
// for PointXYZI
#define X_STEP 3 // 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern void loadRGBvaluesForMFaxes()
{
float h_rgbForMFaxes[7];
for (uint32_t k=0; k<7; ++k)
{
union{
uint8_t asByte[4];
uint32_t asInt;
float asFloat;
} rgb;
rgb.asInt = 0;
if(k ==0)
rgb.asInt = (255 << 16) | (0 << 8) | 0;
else if (k ==1)
rgb.asInt = (255 << 16) | (100 << 8) | 100;
else if (k ==2)
rgb.asInt = (0 << 16) | (255 << 8) | 0;
else if (k ==3)
rgb.asInt = (100 << 16) | (255 << 8) | 100;
else if (k ==4)
rgb.asInt = (0 << 16) | (0 << 8) | 255;
else if (k ==5)
rgb.asInt = (100 << 16) | (100 << 8) | 255;
else if (k ==6)
rgb.asInt = (200 << 16) | (200 << 8) | 200;
h_rgbForMFaxes[k] = rgb.asFloat; //*(float *)(&rgb);
}
cudaMemcpyToSymbol(c_rgbForMFaxes, h_rgbForMFaxes , 7* sizeof(float));
}
/*
* Given assignments z of normals x to MF axes compute the costfunction value
*/
__global__ void robustSquaredAngleCostFct(float *cost, float *x,
uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
//const int tid = threadIdx.x;
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
// xi[tid*3] = x[tid];
// xi[tid*3+1] = x[tid+Nx];
// xi[tid*3+2] = x[tid+Nx*2];
uint32_t k = z[id];
if (k<6)
{
// k==6 means that xi is nan
float xiTy = x[id*X_STEP+X_OFFSET]*mui[k] + x[id*X_STEP+X_OFFSET+1]*mui[k+6]
+ x[id*X_STEP+X_OFFSET+2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
//float errSq = err*err;
rho[tid] += (err*err)/(err*err+sigma_sq);
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
rho[tid] += rho[tid + s];
__syncthreads();
}
if(tid==0 && rho[0]!=0 ) {
atomicAdd(&cost[0],rho[0]);
}
}
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
uint32_t k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] += sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] += sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] += sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]+J_shared[k*BLOCK_SIZE+1]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void robustSquaredAngleCostFctAssignment(float *cost, uint32_t* W,
float *x, uint32_t *z, float *mu, float sigma_sq, int N)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Wi[BLOCK_SIZE];
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
uint32_t k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (uint32_t k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] += (err_min*err_min)/(err_min*err_min+sigma_sq);
Wi[tid] += 1;
}
z[id] = k_min;
// errs[id] = err_min;
// if(X_STEP == 8)
// {
// x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
// x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
// x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
//// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
// }
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Wi[tid] += Wi[tid + s];
}
__syncthreads();
}
if(tid==0) {
atomicAdd(&cost[0],rho[0]+rho[1]);
}
if(tid==1) {
atomicAdd(W,Wi[0]+Wi[1]);
}
}
#include "optimizationSO3_weighted.cu"
|
55011f7df96bf47a378206a2c6835cdfbd2da5e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/channel_stats_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/reduce.cuh"
namespace caffe2 {
namespace {
template <typename T, int kBlockDimX, int kBlockDimY>
__global__ void ChannelStatsNCHWCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
T* sum,
T* sumsq) {
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage m_storage;
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage v_storage;
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int n = threadIdx.x; n < N; n += blockDim.x) {
for (int hw = threadIdx.y; hw < HxW; hw += blockDim.y) {
const int index = (n * C + c) * HxW + hw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + index);
v_val += __ldg(X + index) * __ldg(X + index);
#else
m_val += X[index];
v_val += X[index] * X[index];
#endif
}
}
m_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(m_storage).Sum(m_val);
v_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(v_storage).Sum(v_val);
if (threadIdx.x == 0 && threadIdx.y == 0) {
sum[c] = m_val;
sumsq[c] = v_val;
}
}
template <typename T>
__global__ void ChannelStatsNHWCCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
T* sum,
T* sumsq) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const int inner_size = N * HxW;
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int i = threadIdx.x; i < inner_size; i += blockDim.x) {
const int index = i * C + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + index);
v_val += __ldg(X + index) * __ldg(X + index);
#else
m_val += X[index];
v_val += X[index] * X[index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
sum[c] = m_val;
sumsq[c] = v_val;
}
}
} // namespace
template <>
template <>
bool ChannelStatsOp<CUDAContext>::ComputeChannelStatsNCHW<float>(
const int N,
const int C,
const int HxW,
const float* X,
float* sum,
float* sumsq) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1(
HxW,
ChannelStatsNCHWCUDAKernel,
float,
C,
context_.cuda_stream(),
N,
C,
HxW,
X,
sum,
sumsq);
return true;
}
template <>
template <>
bool ChannelStatsOp<CUDAContext>::ComputeChannelStatsNHWC<float>(
const int N,
const int C,
const int HxW,
const float* X,
float* sum,
float* sumsq) {
hipLaunchKernelGGL(( ChannelStatsNHWCCUDAKernel<float>)
, dim3(C), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, C, HxW, X, sum, sumsq);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(ChannelStats, ChannelStatsOp<CUDAContext>);
} // namespace caffe2
|
55011f7df96bf47a378206a2c6835cdfbd2da5e4.cu
|
#include "caffe2/operators/channel_stats_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/reduce.cuh"
namespace caffe2 {
namespace {
template <typename T, int kBlockDimX, int kBlockDimY>
__global__ void ChannelStatsNCHWCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
T* sum,
T* sumsq) {
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage m_storage;
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage v_storage;
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int n = threadIdx.x; n < N; n += blockDim.x) {
for (int hw = threadIdx.y; hw < HxW; hw += blockDim.y) {
const int index = (n * C + c) * HxW + hw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + index);
v_val += __ldg(X + index) * __ldg(X + index);
#else
m_val += X[index];
v_val += X[index] * X[index];
#endif
}
}
m_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(m_storage).Sum(m_val);
v_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(v_storage).Sum(v_val);
if (threadIdx.x == 0 && threadIdx.y == 0) {
sum[c] = m_val;
sumsq[c] = v_val;
}
}
template <typename T>
__global__ void ChannelStatsNHWCCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
T* sum,
T* sumsq) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const int inner_size = N * HxW;
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int i = threadIdx.x; i < inner_size; i += blockDim.x) {
const int index = i * C + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + index);
v_val += __ldg(X + index) * __ldg(X + index);
#else
m_val += X[index];
v_val += X[index] * X[index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
sum[c] = m_val;
sumsq[c] = v_val;
}
}
} // namespace
template <>
template <>
bool ChannelStatsOp<CUDAContext>::ComputeChannelStatsNCHW<float>(
const int N,
const int C,
const int HxW,
const float* X,
float* sum,
float* sumsq) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1(
HxW,
ChannelStatsNCHWCUDAKernel,
float,
C,
context_.cuda_stream(),
N,
C,
HxW,
X,
sum,
sumsq);
return true;
}
template <>
template <>
bool ChannelStatsOp<CUDAContext>::ComputeChannelStatsNHWC<float>(
const int N,
const int C,
const int HxW,
const float* X,
float* sum,
float* sumsq) {
ChannelStatsNHWCCUDAKernel<float>
<<<C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, C, HxW, X, sum, sumsq);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(ChannelStats, ChannelStatsOp<CUDAContext>);
} // namespace caffe2
|
1d980dcecb6a74b22e331f5f95d6f26c49e6aaf6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipsparse.h>
#include "rmm_utils.h"
#include "cusparse_helper.h"
namespace cugraph
{
hipsparseHandle_t Cusparse::m_handle = 0;
template <typename ValueType>
CusparseCsrMV<ValueType>::CusparseCsrMV() {
if (sizeof(ValueType) == 4)
cuda_type = HIP_R_32F;
else
cuda_type = HIP_R_64F;
CHECK_CUSPARSE(hipsparseCreateMatDescr(&descrA));
CHECK_CUSPARSE(hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO));
CHECK_CUSPARSE(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL ));
//alg = CUSPARSE_ALG_NAIVE;
alg = CUSPARSE_ALG_MERGE_PATH;
stream = nullptr;
}
template <typename ValueType>
CusparseCsrMV<ValueType>::~CusparseCsrMV() {
ALLOC_FREE_TRY(spmv_d_temp_storage, stream);
}
template <typename ValueType>
void CusparseCsrMV<ValueType>::setup(int m,
int n,
int nnz,
const ValueType* alpha,
const ValueType* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const ValueType* x,
const ValueType* beta,
ValueType* y) {
CHECK_CUSPARSE (cusparseCsrmvEx_bufferSize(Cusparse::get_handle(),
alg,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
nnz,
alpha,
cuda_type,
descrA,
csrValA,
cuda_type,
csrRowPtrA,
csrColIndA,
x,
cuda_type,
beta,
cuda_type,
y,
cuda_type,
cuda_type,
&spmv_temp_storage_bytes));
ALLOC_TRY ((void**)&spmv_d_temp_storage, spmv_temp_storage_bytes, stream);
}
template <typename ValueType>
void CusparseCsrMV<ValueType>::run(int m,
int n,
int nnz,
const ValueType* alpha,
const ValueType* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const ValueType* x,
const ValueType* beta,
ValueType* y) {
CHECK_CUSPARSE(cusparseCsrmvEx(Cusparse::get_handle(),
alg,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
nnz,
alpha,
cuda_type,
descrA,
csrValA,
cuda_type,
csrRowPtrA,
csrColIndA,
x,
cuda_type,
beta,
cuda_type,
y,
cuda_type,
cuda_type,
spmv_d_temp_storage));
}
template class CusparseCsrMV<double>;
template class CusparseCsrMV<float>;
} //namespace
|
1d980dcecb6a74b22e331f5f95d6f26c49e6aaf6.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse.h>
#include "rmm_utils.h"
#include "cusparse_helper.h"
namespace cugraph
{
cusparseHandle_t Cusparse::m_handle = 0;
template <typename ValueType>
CusparseCsrMV<ValueType>::CusparseCsrMV() {
if (sizeof(ValueType) == 4)
cuda_type = CUDA_R_32F;
else
cuda_type = CUDA_R_64F;
CHECK_CUSPARSE(cusparseCreateMatDescr(&descrA));
CHECK_CUSPARSE(cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO));
CHECK_CUSPARSE(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL ));
//alg = CUSPARSE_ALG_NAIVE;
alg = CUSPARSE_ALG_MERGE_PATH;
stream = nullptr;
}
template <typename ValueType>
CusparseCsrMV<ValueType>::~CusparseCsrMV() {
ALLOC_FREE_TRY(spmv_d_temp_storage, stream);
}
template <typename ValueType>
void CusparseCsrMV<ValueType>::setup(int m,
int n,
int nnz,
const ValueType* alpha,
const ValueType* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const ValueType* x,
const ValueType* beta,
ValueType* y) {
CHECK_CUSPARSE (cusparseCsrmvEx_bufferSize(Cusparse::get_handle(),
alg,
CUSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
nnz,
alpha,
cuda_type,
descrA,
csrValA,
cuda_type,
csrRowPtrA,
csrColIndA,
x,
cuda_type,
beta,
cuda_type,
y,
cuda_type,
cuda_type,
&spmv_temp_storage_bytes));
ALLOC_TRY ((void**)&spmv_d_temp_storage, spmv_temp_storage_bytes, stream);
}
template <typename ValueType>
void CusparseCsrMV<ValueType>::run(int m,
int n,
int nnz,
const ValueType* alpha,
const ValueType* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const ValueType* x,
const ValueType* beta,
ValueType* y) {
CHECK_CUSPARSE(cusparseCsrmvEx(Cusparse::get_handle(),
alg,
CUSPARSE_OPERATION_NON_TRANSPOSE,
m,
n,
nnz,
alpha,
cuda_type,
descrA,
csrValA,
cuda_type,
csrRowPtrA,
csrColIndA,
x,
cuda_type,
beta,
cuda_type,
y,
cuda_type,
cuda_type,
spmv_d_temp_storage));
}
template class CusparseCsrMV<double>;
template class CusparseCsrMV<float>;
} //namespace
|
b2f42fbbd1f0603b711e794e6176fd5d2b8e5b28.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <float.h> //FLT_MAX
#include "KMeans.h"
__constant__ Vector2 Clusters[3];
__global__ void KMeansKernel( Datapoint* data, long n, int k )
{
//Assignment of each data point to a cluster
int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
if(threadID < n)
{
float Min_Dist = FLT_MAX;
int nearest_cluster = 0;
data[threadID].altered = false;
for(int j=0;j<k;j++)
{
if(data[threadID].p.distSq(Clusters[j]) < Min_Dist)
{
Min_Dist = data[threadID].p.distSq(Clusters[j]);
nearest_cluster = j;
}
}
if(nearest_cluster != data[threadID].cluster)
{
data[threadID].cluster = nearest_cluster;
data[threadID].altered = true;
}
}
}
bool KMeansGPU( Datapoint* data, long n, Vector2* clusters, int k )
{
hipError_t status;
bool exit = false;
int count;
Vector2 Center;
int bytes1 = k * sizeof(Vector2);
hipMalloc((void**) &Clusters, bytes1);
hipMemcpyToSymbol(Clusters, clusters, bytes1, 0, hipMemcpyHostToDevice);
Datapoint* DataSet;
int bytes2 = n * sizeof(Datapoint);
hipMalloc((void**) &DataSet, bytes2);
//iterates until no data point changes its cluster
while(!exit)
{
count = 0;
exit = true;
hipMemcpy(DataSet, data, bytes2, hipMemcpyHostToDevice);
dim3 dimBlock(768, 1);
dim3 dimGrid((int)ceil((float)n/768), 1);
hipLaunchKernelGGL(( KMeansKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, DataSet, n, k);
// Wait for completion
hipDeviceSynchronize();
// Check for errors
status = hipGetLastError();
if (status != hipSuccess)
{
std::cout << "Kernel failed: " << hipGetErrorString(status) << std::endl;
hipFree(DataSet);
return false;
}
// Retrieve the result matrix
hipMemcpy(data, DataSet, bytes2, hipMemcpyDeviceToHost);
//calculation of new center for all 3 clusters
for(int i=0;i<k;i++)
{
count = 0;
Center.x = 0;
Center.y = 0;
for(int j=0;j<n;j++)
{
if(data[j].cluster == i)
{
Center.x += data[j].p.x;
Center.y += data[j].p.y;
count++;
}
}
if(count >0)
{
clusters[i].x = (Center.x)/count;
clusters[i].y = (Center.y)/count;
}
}
hipMemcpyToSymbol(Clusters, clusters, bytes1, 0, hipMemcpyHostToDevice);
for(int i=0;i<n;i++)
{
if(data[i].altered == true)
{
data[i].altered = false;
exit = false;
}
}
}
hipFree(DataSet);
// Success
return true;
}
|
b2f42fbbd1f0603b711e794e6176fd5d2b8e5b28.cu
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <float.h> //FLT_MAX
#include "KMeans.h"
__constant__ Vector2 Clusters[3];
__global__ void KMeansKernel( Datapoint* data, long n, int k )
{
//Assignment of each data point to a cluster
int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
if(threadID < n)
{
float Min_Dist = FLT_MAX;
int nearest_cluster = 0;
data[threadID].altered = false;
for(int j=0;j<k;j++)
{
if(data[threadID].p.distSq(Clusters[j]) < Min_Dist)
{
Min_Dist = data[threadID].p.distSq(Clusters[j]);
nearest_cluster = j;
}
}
if(nearest_cluster != data[threadID].cluster)
{
data[threadID].cluster = nearest_cluster;
data[threadID].altered = true;
}
}
}
bool KMeansGPU( Datapoint* data, long n, Vector2* clusters, int k )
{
cudaError_t status;
bool exit = false;
int count;
Vector2 Center;
int bytes1 = k * sizeof(Vector2);
cudaMalloc((void**) &Clusters, bytes1);
cudaMemcpyToSymbol(Clusters, clusters, bytes1, 0, cudaMemcpyHostToDevice);
Datapoint* DataSet;
int bytes2 = n * sizeof(Datapoint);
cudaMalloc((void**) &DataSet, bytes2);
//iterates until no data point changes its cluster
while(!exit)
{
count = 0;
exit = true;
cudaMemcpy(DataSet, data, bytes2, cudaMemcpyHostToDevice);
dim3 dimBlock(768, 1);
dim3 dimGrid((int)ceil((float)n/768), 1);
KMeansKernel<<<dimGrid, dimBlock>>>(DataSet, n, k);
// Wait for completion
cudaThreadSynchronize();
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess)
{
std::cout << "Kernel failed: " << cudaGetErrorString(status) << std::endl;
cudaFree(DataSet);
return false;
}
// Retrieve the result matrix
cudaMemcpy(data, DataSet, bytes2, cudaMemcpyDeviceToHost);
//calculation of new center for all 3 clusters
for(int i=0;i<k;i++)
{
count = 0;
Center.x = 0;
Center.y = 0;
for(int j=0;j<n;j++)
{
if(data[j].cluster == i)
{
Center.x += data[j].p.x;
Center.y += data[j].p.y;
count++;
}
}
if(count >0)
{
clusters[i].x = (Center.x)/count;
clusters[i].y = (Center.y)/count;
}
}
cudaMemcpyToSymbol(Clusters, clusters, bytes1, 0, cudaMemcpyHostToDevice);
for(int i=0;i<n;i++)
{
if(data[i].altered == true)
{
data[i].altered = false;
exit = false;
}
}
}
cudaFree(DataSet);
// Success
return true;
}
|
ef902f6426895128fd62f7b4d7fbe5516a9e0a56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2013, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_set_value_real_cuda.h"
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_mem_set_value_real_cudak_r_f(const int num, float* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = value;
}
__global__
void oskar_mem_set_value_real_cudak_c_f(const int num, float2* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = make_float2(value, 0.0f);
}
__global__
void oskar_mem_set_value_real_cudak_m_f(const int num, float4c* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i].a = make_float2(value, 0.0f);
data[i].b = make_float2(0.0f, 0.0f);
data[i].c = make_float2(0.0f, 0.0f);
data[i].d = make_float2(value, 0.0f);
}
/* Double precision. */
__global__
void oskar_mem_set_value_real_cudak_r_d(const int num, double* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = value;
}
__global__
void oskar_mem_set_value_real_cudak_c_d(const int num, double2* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = make_double2(value, 0.0);
}
__global__
void oskar_mem_set_value_real_cudak_m_d(const int num, double4c* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i].a = make_double2(value, 0.0);
data[i].b = make_double2(0.0, 0.0);
data[i].c = make_double2(0.0, 0.0);
data[i].d = make_double2(value, 0.0);
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_mem_set_value_real_cuda_r_f(int num, float* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_r_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_c_f(int num, float2* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_c_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_m_f(int num, float4c* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_m_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
/* Double precision. */
void oskar_mem_set_value_real_cuda_r_d(int num, double* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_r_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_c_d(int num, double2* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_c_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_m_d(int num, double4c* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_m_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
#ifdef __cplusplus
}
#endif
|
ef902f6426895128fd62f7b4d7fbe5516a9e0a56.cu
|
/*
* Copyright (c) 2013, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_set_value_real_cuda.h"
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_mem_set_value_real_cudak_r_f(const int num, float* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = value;
}
__global__
void oskar_mem_set_value_real_cudak_c_f(const int num, float2* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = make_float2(value, 0.0f);
}
__global__
void oskar_mem_set_value_real_cudak_m_f(const int num, float4c* data,
const float value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i].a = make_float2(value, 0.0f);
data[i].b = make_float2(0.0f, 0.0f);
data[i].c = make_float2(0.0f, 0.0f);
data[i].d = make_float2(value, 0.0f);
}
/* Double precision. */
__global__
void oskar_mem_set_value_real_cudak_r_d(const int num, double* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = value;
}
__global__
void oskar_mem_set_value_real_cudak_c_d(const int num, double2* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i] = make_double2(value, 0.0);
}
__global__
void oskar_mem_set_value_real_cudak_m_d(const int num, double4c* data,
const double value)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num) return;
data[i].a = make_double2(value, 0.0);
data[i].b = make_double2(0.0, 0.0);
data[i].c = make_double2(0.0, 0.0);
data[i].d = make_double2(value, 0.0);
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_mem_set_value_real_cuda_r_f(int num, float* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_r_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_c_f(int num, float2* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_c_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_m_f(int num, float4c* data, float value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_m_f OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
/* Double precision. */
void oskar_mem_set_value_real_cuda_r_d(int num, double* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_r_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_c_d(int num, double2* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_c_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
void oskar_mem_set_value_real_cuda_m_d(int num, double4c* data, double value)
{
int num_blocks, num_threads = 256;
num_blocks = (num + num_threads - 1) / num_threads;
oskar_mem_set_value_real_cudak_m_d OSKAR_CUDAK_CONF(num_blocks,
num_threads) (num, data, value);
}
#ifdef __cplusplus
}
#endif
|
d586de1bf3775d23112a9aaf6d485e4d32f86273.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0;
}
}
|
d586de1bf3775d23112a9aaf6d485e4d32f86273.cu
|
#include "includes.h"
__global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0;
}
}
|
e5cad73266143e8838d7f20ac83a6ca23c5b95db.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
FNELEM-GPU MAIN FILE
Performs finite element structural analysis using an 4-node membrane, matrix inversion
was calculated using a CUDA algorithm (Gauss Jordan inversion).
@author ppizarror
@date 19/11/2018
@license
MIT License
Copyright (c) 2018 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// CUDA library imports
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
// FNELEM library imports
#include "fnelem/math/fematrix.cpp"
#include "fnelem/math/fematrix_utils.cpp"
#include "fnelem/math/matrix_inversion_cpu.cpp"
#include "fnelem/math/matrix_inversion_cuda.cu"
#include "fnelem/analysis/static_analysis.cpp"
#include "fnelem/model/base/model.cpp"
#include "fnelem/model/base/model_component.cpp"
#include "fnelem/model/elements/element.cpp"
#include "fnelem/model/elements/membrane.cpp"
#include "fnelem/model/loads/load.cpp"
#include "fnelem/model/loads/load_membrane_distributed.cpp"
#include "fnelem/model/loads/load_node.cpp"
#include "fnelem/model/loads/load_pattern.cpp"
#include "fnelem/model/loads/load_pattern_constant.cpp"
#include "fnelem/model/nodes/node.cpp"
#include "fnelem/model/restraints/restraint.cpp"
#include "fnelem/model/restraints/restraint_node.cpp"
#include "test/test_suite.h"
int main() {
// test_suite(); // Test all
test_analysis(); // Test analysis
return 0;
}
|
e5cad73266143e8838d7f20ac83a6ca23c5b95db.cu
|
/**
FNELEM-GPU MAIN FILE
Performs finite element structural analysis using an 4-node membrane, matrix inversion
was calculated using a CUDA algorithm (Gauss Jordan inversion).
@author ppizarror
@date 19/11/2018
@license
MIT License
Copyright (c) 2018 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// CUDA library imports
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
// FNELEM library imports
#include "fnelem/math/fematrix.cpp"
#include "fnelem/math/fematrix_utils.cpp"
#include "fnelem/math/matrix_inversion_cpu.cpp"
#include "fnelem/math/matrix_inversion_cuda.cu"
#include "fnelem/analysis/static_analysis.cpp"
#include "fnelem/model/base/model.cpp"
#include "fnelem/model/base/model_component.cpp"
#include "fnelem/model/elements/element.cpp"
#include "fnelem/model/elements/membrane.cpp"
#include "fnelem/model/loads/load.cpp"
#include "fnelem/model/loads/load_membrane_distributed.cpp"
#include "fnelem/model/loads/load_node.cpp"
#include "fnelem/model/loads/load_pattern.cpp"
#include "fnelem/model/loads/load_pattern_constant.cpp"
#include "fnelem/model/nodes/node.cpp"
#include "fnelem/model/restraints/restraint.cpp"
#include "fnelem/model/restraints/restraint_node.cpp"
#include "test/test_suite.h"
int main() {
// test_suite(); // Test all
test_analysis(); // Test analysis
return 0;
}
|
8a5674fb79e80b5743c469362698221365118395.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
__global__ void partialScan(float *out, float *in, float *out_b, unsigned in_size) {
__shared__ float buf[BLOCK_SIZE * 2];
int tx = threadIdx.x, offset = blockIdx.x * blockDim.x * 2;
if (blockIdx.x == 0 && threadIdx.x == 0) {
buf[0] = 0;
if (BLOCK_SIZE < in_size) buf[BLOCK_SIZE] = in[BLOCK_SIZE - 1];
}
else {
if (tx + offset - 1 < in_size) buf[tx] = in[tx + offset - 1];
if (tx + offset + BLOCK_SIZE - 1 < in_size) buf[tx + BLOCK_SIZE] = in[tx + offset + BLOCK_SIZE - 1];
}
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int i = (tx + 1) * stride * 2 - 1;
if (i < BLOCK_SIZE * 2) buf[i] += buf[i - stride];
}
for (int stride = BLOCK_SIZE/2; stride >= 1; stride /= 2) {
__syncthreads();
int i = (tx + 1) * stride * 2 - 1;
if (i + stride < BLOCK_SIZE * 2) buf[i + stride] += buf[i];
}
__syncthreads();
// copy to out_b
if (!tx) out_b[blockIdx.x] = buf[BLOCK_SIZE * 2 - 1];
// copy to out
if (tx + offset < in_size) out[tx + offset] = buf[tx];
if (tx + offset + BLOCK_SIZE < in_size) out[tx + offset + BLOCK_SIZE] = buf[tx + BLOCK_SIZE];
}
__global__ void addVec(float *out, float *toAdd, unsigned in_size) {
int tx = threadIdx.x, offset = blockIdx.x * blockDim.x * 2;
if (tx + offset < in_size) out[tx + offset] += toAdd[blockIdx.x];
if (tx + offset + BLOCK_SIZE < in_size) out[tx + offset + BLOCK_SIZE] += toAdd[blockIdx.x];
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void preScan(float *out, float *in, unsigned in_size)
{
// INSERT CODE HERE
hipError_t cuda_ret;
float *out_b;
int gridLen = (in_size - 1) / (2 * BLOCK_SIZE) + 1;
dim3 gridDim(gridLen, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// allocate space for last value in first n - 1 block
cuda_ret = hipMalloc((void**)&out_b, gridLen * sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
// launch kernel
hipLaunchKernelGGL(( partialScan) , dim3(gridDim), dim3(blockDim), 0, 0, out, in, out_b, in_size);
// preScan and plus back out_d if needed
if (gridLen > 1) {
float *out_bscaned;
cuda_ret = hipMalloc((void**)&out_bscaned, gridLen * sizeof(float));
if (cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
preScan(out_bscaned, out_b, gridLen);
hipLaunchKernelGGL(( addVec) , dim3(gridDim), dim3(blockDim), 0, 0, out, out_bscaned, in_size);
hipFree(out_bscaned);
}
hipFree(out_b);
}
|
8a5674fb79e80b5743c469362698221365118395.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
__global__ void partialScan(float *out, float *in, float *out_b, unsigned in_size) {
__shared__ float buf[BLOCK_SIZE * 2];
int tx = threadIdx.x, offset = blockIdx.x * blockDim.x * 2;
if (blockIdx.x == 0 && threadIdx.x == 0) {
buf[0] = 0;
if (BLOCK_SIZE < in_size) buf[BLOCK_SIZE] = in[BLOCK_SIZE - 1];
}
else {
if (tx + offset - 1 < in_size) buf[tx] = in[tx + offset - 1];
if (tx + offset + BLOCK_SIZE - 1 < in_size) buf[tx + BLOCK_SIZE] = in[tx + offset + BLOCK_SIZE - 1];
}
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
__syncthreads();
int i = (tx + 1) * stride * 2 - 1;
if (i < BLOCK_SIZE * 2) buf[i] += buf[i - stride];
}
for (int stride = BLOCK_SIZE/2; stride >= 1; stride /= 2) {
__syncthreads();
int i = (tx + 1) * stride * 2 - 1;
if (i + stride < BLOCK_SIZE * 2) buf[i + stride] += buf[i];
}
__syncthreads();
// copy to out_b
if (!tx) out_b[blockIdx.x] = buf[BLOCK_SIZE * 2 - 1];
// copy to out
if (tx + offset < in_size) out[tx + offset] = buf[tx];
if (tx + offset + BLOCK_SIZE < in_size) out[tx + offset + BLOCK_SIZE] = buf[tx + BLOCK_SIZE];
}
__global__ void addVec(float *out, float *toAdd, unsigned in_size) {
int tx = threadIdx.x, offset = blockIdx.x * blockDim.x * 2;
if (tx + offset < in_size) out[tx + offset] += toAdd[blockIdx.x];
if (tx + offset + BLOCK_SIZE < in_size) out[tx + offset + BLOCK_SIZE] += toAdd[blockIdx.x];
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void preScan(float *out, float *in, unsigned in_size)
{
// INSERT CODE HERE
cudaError_t cuda_ret;
float *out_b;
int gridLen = (in_size - 1) / (2 * BLOCK_SIZE) + 1;
dim3 gridDim(gridLen, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// allocate space for last value in first n - 1 block
cuda_ret = cudaMalloc((void**)&out_b, gridLen * sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
// launch kernel
partialScan <<<gridDim, blockDim>>> (out, in, out_b, in_size);
// preScan and plus back out_d if needed
if (gridLen > 1) {
float *out_bscaned;
cuda_ret = cudaMalloc((void**)&out_bscaned, gridLen * sizeof(float));
if (cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
preScan(out_bscaned, out_b, gridLen);
addVec <<<gridDim, blockDim>>> (out, out_bscaned, in_size);
cudaFree(out_bscaned);
}
cudaFree(out_b);
}
|
35138ffb13f716fe582dcd1d15b887365db87e84.hip
|
// !!! This is a file automatically generated by hipify!!!
/*********************************************************************
run-cg.cu
Hauptprogramm. Testet Reduktion und ruft cg auf.
**********************************************************************/
#define MAIN_PROGRAM
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "global.h"
#include "geometry.h"
#include "linalg.h"
#include "cg.h"
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int nBytes, status, N;
double *w, *v, *x;
double iStart, iElaps;
N=32;
int dimx = 256;
int dimy = 1;
if (argc>1)
{
N=atoi(argv[1]);
}
if (argc>3)
{
dimx=atoi(argv[2]);
dimy=atoi(argv[3]);
}
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx=N;
Ny=N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(double);
// Speicher fr Vektoren allozieren
w=(double*)malloc(nBytes);
v=(double*)malloc(nBytes);
// auf Null setzen
memset(w, 0, nBytes);
memset(v, 0, nBytes);
// Aktive Punkte ausgeben
if ((Nx<=16)&&(Ny<=16))
print_active();
random_vector(w);
random_vector(v);
double *d_v, *d_w, *d_x;
CHECK(hipMalloc((void **)&d_v, nBytes));
CHECK(hipMalloc((void **)&d_w, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_v, v, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_w, w, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
block.x=dimx;
block.y=dimy;
block.z=1;
grid.x=(Nx + block.x - 1) / block.x;
grid.y=(Ny + block.y - 1) / block.y;
grid.z=1;
// Test reduction
int Nunroll=8;
if (npts>256 && Nunroll>1)
{
double cpu_sum=0.0;
iStart = seconds();
for (int i = 0; i < npts; i++) cpu_sum += v[i];
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum);
dim3 block2 (256,1);
int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll);
dim3 grid2 (nblk,1);
CHECK(hipMalloc((void **)&d_x, nblk*sizeof(double)));
CHECK(hipMemset(d_x,0,nblk*sizeof(double)));
x=(double*)malloc(nblk*sizeof(double));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling), dim3(grid2), dim3(block2), 0, 0, d_v, d_x, npts);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(x, d_x, nblk * sizeof(double),hipMemcpyDeviceToHost));
double gpu_sum = 0.0;
for (int i = 0; i < grid2.x; i++) gpu_sum += x[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x);
assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON);
}
// Einheitsvektor
memset(v, 0, nBytes);
v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1)
print_vector("v",v,1);
iStart = seconds();
cg(w,v,1000,1e-10,&status);
iElaps = seconds() - iStart;
printf("cpu cg elapsed %f sec \n", iElaps);
print_vector("x",w,0);
free(active);
free(w);
free(v);
return (0);
}
|
35138ffb13f716fe582dcd1d15b887365db87e84.cu
|
/*********************************************************************
run-cg.cu
Hauptprogramm. Testet Reduktion und ruft cg auf.
**********************************************************************/
#define MAIN_PROGRAM
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "global.h"
#include "geometry.h"
#include "linalg.h"
#include "cg.h"
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int nBytes, status, N;
double *w, *v, *x;
double iStart, iElaps;
N=32;
int dimx = 256;
int dimy = 1;
if (argc>1)
{
N=atoi(argv[1]);
}
if (argc>3)
{
dimx=atoi(argv[2]);
dimy=atoi(argv[3]);
}
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx=N;
Ny=N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(double);
// Speicher für Vektoren allozieren
w=(double*)malloc(nBytes);
v=(double*)malloc(nBytes);
// auf Null setzen
memset(w, 0, nBytes);
memset(v, 0, nBytes);
// Aktive Punkte ausgeben
if ((Nx<=16)&&(Ny<=16))
print_active();
random_vector(w);
random_vector(v);
double *d_v, *d_w, *d_x;
CHECK(cudaMalloc((void **)&d_v, nBytes));
CHECK(cudaMalloc((void **)&d_w, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_v, v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, w, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
block.x=dimx;
block.y=dimy;
block.z=1;
grid.x=(Nx + block.x - 1) / block.x;
grid.y=(Ny + block.y - 1) / block.y;
grid.z=1;
// Test reduction
int Nunroll=8;
if (npts>256 && Nunroll>1)
{
double cpu_sum=0.0;
iStart = seconds();
for (int i = 0; i < npts; i++) cpu_sum += v[i];
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum);
dim3 block2 (256,1);
int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll);
dim3 grid2 (nblk,1);
CHECK(cudaMalloc((void **)&d_x, nblk*sizeof(double)));
CHECK(cudaMemset(d_x,0,nblk*sizeof(double)));
x=(double*)malloc(nblk*sizeof(double));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling<<<grid2, block2>>>(d_v, d_x, npts);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(x, d_x, nblk * sizeof(double),cudaMemcpyDeviceToHost));
double gpu_sum = 0.0;
for (int i = 0; i < grid2.x; i++) gpu_sum += x[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x);
assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON);
}
// Einheitsvektor
memset(v, 0, nBytes);
v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1)
print_vector("v",v,1);
iStart = seconds();
cg(w,v,1000,1e-10,&status);
iElaps = seconds() - iStart;
printf("cpu cg elapsed %f sec \n", iElaps);
print_vector("x",w,0);
free(active);
free(w);
free(v);
return (0);
}
|
c0186c1029a695f383c2e93d26d5ca62756cb227.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "book.h"
#define BlockNum 10
#define ThreadNum 10
using namespace std;
__global__ void count(float *dnumbers)
{
dnumbers[blockIdx.x*blockDim.x+threadIdx.x]=blockIdx.x*blockDim.x+threadIdx.x;
}
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
int main()
{
/*
int size = BlockNum * ThreadNum * sizeof(float);
float *numbers, * dnumbers;
numbers = (float *)malloc(size);
hipMalloc(&dnumbers,size);
count<<<BlockNum,ThreadNum>>>(dnumbers);
hipMemcpy(numbers,dnumbers,size,hipMemcpyDeviceToHost);
for(int i=0;i<BlockNum * ThreadNum;++i)
{
cout<<numbers[i]<<endl;
}
*/
int c;
int *dev_c;
HANDLE_ERROR( hipMalloc( (void**)&dev_c, sizeof(int) ) );hipLaunchKernelGGL((
add), dim3(1),dim3(1), 0, 0, 2,7,dev_c);
HANDLE_ERROR( hipMemcpy( &c,
dev_c,
sizeof(int),
hipMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
hipFree(dev_c);
return 0;
}
|
c0186c1029a695f383c2e93d26d5ca62756cb227.cu
|
#include <iostream>
#include "book.h"
#define BlockNum 10
#define ThreadNum 10
using namespace std;
__global__ void count(float *dnumbers)
{
dnumbers[blockIdx.x*blockDim.x+threadIdx.x]=blockIdx.x*blockDim.x+threadIdx.x;
}
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
int main()
{
/*
int size = BlockNum * ThreadNum * sizeof(float);
float *numbers, * dnumbers;
numbers = (float *)malloc(size);
cudaMalloc(&dnumbers,size);
count<<<BlockNum,ThreadNum>>>(dnumbers);
cudaMemcpy(numbers,dnumbers,size,cudaMemcpyDeviceToHost);
for(int i=0;i<BlockNum * ThreadNum;++i)
{
cout<<numbers[i]<<endl;
}
*/
int c;
int *dev_c;
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, sizeof(int) ) );
add<<<1,1>>>(2,7,dev_c);
HANDLE_ERROR( cudaMemcpy( &c,
dev_c,
sizeof(int),
cudaMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
cudaFree(dev_c);
return 0;
}
|
217eb6b55c5aff6b63b13900d5e9a37301eadf3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../gpu_inc/cost.cuh"
__global__ void cu_Build_cost_table(uchar *d_ll, uchar *d_rr,
uint64_t *d_cost_table_l,
uint64_t *d_cost_table_r,
int img_w, int img_h,
int win_w, int win_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
uint64_t value_l = 0, value_r = 0;
uchar ctr_pixel_l = d_ll[row*img_w + col];
uchar ctr_pixel_r = d_rr[row*img_w + col];
for (int i = -win_h / 2; i <= win_h / 2; i++)
{
int y = MAX(row + i, 0); // check border
y = MIN(y, img_h - 1);
for (int j = -win_w / 2; j <= win_w / 2; j++)
{
if (i == 0 && j == 0)
continue;
int x = MAX(col + j, 0);
x = MIN(x, img_w - 1);
int index_ = y * img_w + x;
value_l = (value_l | (d_ll[index_] > ctr_pixel_l)) << 1;
value_r = (value_r | (d_rr[index_] > ctr_pixel_r)) << 1;
}
}
d_cost_table_l[row*img_w + col] = value_l;
d_cost_table_r[row*img_w + col] = value_r;
return;
}
__global__ void cu_Build_dsi_from_table(uint64_t *d_cost_table_l,
uint64_t *d_cost_table_r,
float *d_cost,
int img_w, int img_h, int max_disp)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
for (int d = 0; d < max_disp; d++)
{
int dst_index = row * img_w * max_disp + col * max_disp + d;
uint64_t ct_l = d_cost_table_l[row*img_w + col];
uint64_t ct_r = d_cost_table_r[row*img_w + MAX(col - d, 0)];
d_cost[dst_index] = cu_hamming_cost(ct_l, ct_r);
}
}
__device__ int cu_hamming_cost(uint64_t ct_l, uint64_t ct_r)
{
uint64_t not_the_same = ct_l ^ ct_r;
// find the number of '1', log(N)
int cnt = 0;
while (not_the_same)
{
//std::cout << not_the_same << std::endl;
cnt += (not_the_same & 1);
not_the_same >>= 1;
}
return cnt;
}
|
217eb6b55c5aff6b63b13900d5e9a37301eadf3d.cu
|
#include "../gpu_inc/cost.cuh"
__global__ void cu_Build_cost_table(uchar *d_ll, uchar *d_rr,
uint64_t *d_cost_table_l,
uint64_t *d_cost_table_r,
int img_w, int img_h,
int win_w, int win_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
uint64_t value_l = 0, value_r = 0;
uchar ctr_pixel_l = d_ll[row*img_w + col];
uchar ctr_pixel_r = d_rr[row*img_w + col];
for (int i = -win_h / 2; i <= win_h / 2; i++)
{
int y = MAX(row + i, 0); // check border
y = MIN(y, img_h - 1);
for (int j = -win_w / 2; j <= win_w / 2; j++)
{
if (i == 0 && j == 0)
continue;
int x = MAX(col + j, 0);
x = MIN(x, img_w - 1);
int index_ = y * img_w + x;
value_l = (value_l | (d_ll[index_] > ctr_pixel_l)) << 1;
value_r = (value_r | (d_rr[index_] > ctr_pixel_r)) << 1;
}
}
d_cost_table_l[row*img_w + col] = value_l;
d_cost_table_r[row*img_w + col] = value_r;
return;
}
__global__ void cu_Build_dsi_from_table(uint64_t *d_cost_table_l,
uint64_t *d_cost_table_r,
float *d_cost,
int img_w, int img_h, int max_disp)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
for (int d = 0; d < max_disp; d++)
{
int dst_index = row * img_w * max_disp + col * max_disp + d;
uint64_t ct_l = d_cost_table_l[row*img_w + col];
uint64_t ct_r = d_cost_table_r[row*img_w + MAX(col - d, 0)];
d_cost[dst_index] = cu_hamming_cost(ct_l, ct_r);
}
}
__device__ int cu_hamming_cost(uint64_t ct_l, uint64_t ct_r)
{
uint64_t not_the_same = ct_l ^ ct_r;
// find the number of '1', log(N)
int cnt = 0;
while (not_the_same)
{
//std::cout << not_the_same << std::endl;
cnt += (not_the_same & 1);
not_the_same >>= 1;
}
return cnt;
}
|
0a9ea441f5745f170be1b122190df79469b7a23a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* File: nw_gpu.cu
* Author: Da Li
* Email: [email protected]
* Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/)
*
* Description: This file defines all the wrapper functions GPU implementations.
*
*/
#include <hip/hip_runtime_api.h>
#include "nw_gpu.h"
#include "nw_kernel_diagonal.cu"
#include "nw_kernel_tile.cu"
inline void cudaCheckError(int line, hipError_t ce)
{
if (ce != hipSuccess){
printf("Error: line %d %s\n", line, hipGetErrorString(ce));
exit(1);
}
}
void nw_gpu_allocate(int stream_num)
{
/* GPU memory allocation */
int i = stream_num;
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set1[i], sizeof(char)*pos1[i][pair_num[i]] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set2[i], sizeof(char)*pos2[i][pair_num[i]] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_score_matrix[i], sizeof(int)*pos_matrix[i][pair_num[i]]) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos1[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos2[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos_matrix[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_dim_matrix[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
/* Memcpy to device */
cudaCheckError( __LINE__, hipMemcpy( d_sequence_set1[i], sequence_set1[i], sizeof(char)*pos1[i][pair_num[i]], hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_sequence_set2[i], sequence_set2[i], sizeof(char)*pos2[i][pair_num[i]], hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos1[i], pos1[i], sizeof(unsigned int)*(pair_num[i]+1), hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos2[i], pos2[i], sizeof(unsigned int)*(pair_num[i]+1), hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos_matrix[i], pos_matrix[i], sizeof(unsigned int)*(pair_num[i]+1), hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_dim_matrix[i], dim_matrix[i], sizeof(unsigned int)*(pair_num[i]+1), hipMemcpyHostToDevice ) );
}
void nw_gpu_destroy(int stream_num)
{
/* GPU memory allocation */
int i = stream_num;
cudaCheckError( __LINE__, hipFree(d_sequence_set1[i]) );
cudaCheckError( __LINE__, hipFree(d_sequence_set2[i]) );
cudaCheckError( __LINE__, hipFree(d_score_matrix[i]) );
cudaCheckError( __LINE__, hipFree(d_pos1[i]) );
cudaCheckError( __LINE__, hipFree(d_pos2[i]) );
cudaCheckError( __LINE__, hipFree(d_pos_matrix[i]) );
cudaCheckError( __LINE__, hipFree(d_dim_matrix[i]) );
}
void nw_gpu(char * sequence_set1, char * sequence_set2, unsigned int * pos1, unsigned int * pos2,
int * score_matrix, unsigned int * pos_matrix, unsigned int pair_num,
int * d_score_matrix, hipStream_t stream, int stream_num, int kernel_type)
{
hipError_t ce;
//printf("Kernel type: %d\n", kernel_type);
switch(kernel_type) {
case 0: nw_cuda_diagonal(stream, stream_num);
break;
case 1: nw_cuda_tile(stream, stream_num);
break;
default:
break;
}
ce = hipGetLastError();
if ( ce != hipSuccess) {
fprintf(stdout, "Error: %s\n", hipGetErrorString(ce));
}
}
void nw_gpu_copyback(int *score_matrix, int *d_score_matrix, unsigned int *pos_matrix, unsigned int pair_num, hipStream_t stream, int stream_num)
{
int i = stream_num;
/* Memcpy to host */
if (DEBUG) {
printf("Dataset %d : %d pairs\n", i, pair_num);
}
cudaCheckError(__LINE__,hipMemcpyAsync(score_matrix,d_score_matrix,sizeof(int)*pos_matrix[pair_num],hipMemcpyDeviceToHost, stream ) );
}
void nw_cuda_diagonal( hipStream_t stream, int stream_num)
{
int i = stream_num;
hipLaunchKernelGGL(( needleman_cuda_diagonal), dim3(config.num_blocks), dim3(config.num_threads), 0, stream,
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], pair_num[i], config.penalty);
}
void nw_cuda_tile( hipStream_t stream, int stream_num)
{
int maxLength = config.length;
int i = stream_num;
int tile_size = TILE_SIZE;
int iteration = maxLength / tile_size + 1;
if ( maxLength%tile_size==0 )
iteration--;
dim3 dimGrid(1,1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
hipLaunchKernelGGL(( needleman_cuda_init), dim3(pair_num[i]), dim3(256), 0, stream, d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], config.penalty);
//process top-left matrix
for( int j = 1; j <= iteration; ++j) {
dimGrid.x = pair_num[i];
dimGrid.y = j;
hipLaunchKernelGGL(( needleman_cuda_tile_upleft), dim3(config.num_blocks), dim3(config.num_threads), 0, stream,
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], pair_num[i], j, config.penalty);
}
//process bottom-right matrix
for( int j = iteration - 1; j >= 1 ; j--){
dimGrid.x = pair_num[i];
dimGrid.y = j;
hipLaunchKernelGGL(( needleman_cuda_tile_bottomright), dim3(config.num_blocks), dim3(config.num_threads), 0, stream,
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], pair_num[i], j, config.penalty);
}
}
|
0a9ea441f5745f170be1b122190df79469b7a23a.cu
|
/*
* File: nw_gpu.cu
* Author: Da Li
* Email: [email protected]
* Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/)
*
* Description: This file defines all the wrapper functions GPU implementations.
*
*/
#include <cuda_runtime_api.h>
#include "nw_gpu.h"
#include "nw_kernel_diagonal.cu"
#include "nw_kernel_tile.cu"
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess){
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
void nw_gpu_allocate(int stream_num)
{
/* GPU memory allocation */
int i = stream_num;
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set1[i], sizeof(char)*pos1[i][pair_num[i]] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set2[i], sizeof(char)*pos2[i][pair_num[i]] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_score_matrix[i], sizeof(int)*pos_matrix[i][pair_num[i]]) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos1[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos2[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos_matrix[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_dim_matrix[i], sizeof(unsigned int)*(pair_num[i]+1) ) );
/* Memcpy to device */
cudaCheckError( __LINE__, cudaMemcpy( d_sequence_set1[i], sequence_set1[i], sizeof(char)*pos1[i][pair_num[i]], cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_sequence_set2[i], sequence_set2[i], sizeof(char)*pos2[i][pair_num[i]], cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos1[i], pos1[i], sizeof(unsigned int)*(pair_num[i]+1), cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos2[i], pos2[i], sizeof(unsigned int)*(pair_num[i]+1), cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos_matrix[i], pos_matrix[i], sizeof(unsigned int)*(pair_num[i]+1), cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_dim_matrix[i], dim_matrix[i], sizeof(unsigned int)*(pair_num[i]+1), cudaMemcpyHostToDevice ) );
}
void nw_gpu_destroy(int stream_num)
{
/* GPU memory allocation */
int i = stream_num;
cudaCheckError( __LINE__, cudaFree(d_sequence_set1[i]) );
cudaCheckError( __LINE__, cudaFree(d_sequence_set2[i]) );
cudaCheckError( __LINE__, cudaFree(d_score_matrix[i]) );
cudaCheckError( __LINE__, cudaFree(d_pos1[i]) );
cudaCheckError( __LINE__, cudaFree(d_pos2[i]) );
cudaCheckError( __LINE__, cudaFree(d_pos_matrix[i]) );
cudaCheckError( __LINE__, cudaFree(d_dim_matrix[i]) );
}
void nw_gpu(char * sequence_set1, char * sequence_set2, unsigned int * pos1, unsigned int * pos2,
int * score_matrix, unsigned int * pos_matrix, unsigned int pair_num,
int * d_score_matrix, cudaStream_t stream, int stream_num, int kernel_type)
{
cudaError_t ce;
//printf("Kernel type: %d\n", kernel_type);
switch(kernel_type) {
case 0: nw_cuda_diagonal(stream, stream_num);
break;
case 1: nw_cuda_tile(stream, stream_num);
break;
default:
break;
}
ce = cudaGetLastError();
if ( ce != cudaSuccess) {
fprintf(stdout, "Error: %s\n", cudaGetErrorString(ce));
}
}
void nw_gpu_copyback(int *score_matrix, int *d_score_matrix, unsigned int *pos_matrix, unsigned int pair_num, cudaStream_t stream, int stream_num)
{
int i = stream_num;
/* Memcpy to host */
if (DEBUG) {
printf("Dataset %d : %d pairs\n", i, pair_num);
}
cudaCheckError(__LINE__,cudaMemcpyAsync(score_matrix,d_score_matrix,sizeof(int)*pos_matrix[pair_num],cudaMemcpyDeviceToHost, stream ) );
}
void nw_cuda_diagonal( cudaStream_t stream, int stream_num)
{
int i = stream_num;
needleman_cuda_diagonal<<<config.num_blocks, config.num_threads, 0, stream>>>(
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], pair_num[i], config.penalty);
}
void nw_cuda_tile( cudaStream_t stream, int stream_num)
{
int maxLength = config.length;
int i = stream_num;
int tile_size = TILE_SIZE;
int iteration = maxLength / tile_size + 1;
if ( maxLength%tile_size==0 )
iteration--;
dim3 dimGrid(1,1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
needleman_cuda_init<<< pair_num[i], 256, 0, stream>>>(d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], config.penalty);
//process top-left matrix
for( int j = 1; j <= iteration; ++j) {
dimGrid.x = pair_num[i];
dimGrid.y = j;
needleman_cuda_tile_upleft<<<config.num_blocks, config.num_threads, 0, stream>>>(
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], pair_num[i], j, config.penalty);
}
//process bottom-right matrix
for( int j = iteration - 1; j >= 1 ; j--){
dimGrid.x = pair_num[i];
dimGrid.y = j;
needleman_cuda_tile_bottomright<<<config.num_blocks, config.num_threads, 0, stream>>>(
d_sequence_set1[i], d_sequence_set2[i], d_pos1[i], d_pos2[i],
d_score_matrix[i], d_pos_matrix[i], d_dim_matrix[i], pair_num[i], j, config.penalty);
}
}
|
b62f0e5c64f91851ee8b9294248a0f589694b85e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix.h"
template <typename T>
__host__ void Matrix<T>::fill(std::normal_distribution<float> distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator);
}
}
template <typename T>
__host__ void Matrix<T>::fill(std::bernoulli_distribution distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator)*2 - 1;
}
}
template <typename T>
__host__ void Matrix<T>::fill(std::uniform_int_distribution<> distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator);
}
}
template <typename T>
__host__ __device__ void Matrix<T>::fill(T val) {
if (this->device==0){
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = val;
}
} else {
hipMemset(this->data, val, (this->numRows * this->numCols)*sizeof(T));
}
}
template <typename T>
Matrix<T> Matrix<T>::transpose() {
// int temp = this->numRows;
// this->numRows = this->numCols;
// this->numCols = temp;
// T* data = new T[this->numCols* this->numRows];
Matrix<T> thisT = Matrix<T>(this->numCols, this->numRows);
for (int r = 0; r < this->numRows; r++) {
for (int c = 0; c < this->numCols; c++) {
thisT.data[thisT.index(c,r)] = this->data[this->index(r,c)];
}
}
return thisT;
}
template <typename T>
__host__ __device__ int Matrix<T>::index(int row, int col) {
return row*(this->numCols) + col;
}
// Move matrix between CPU and device
template <typename T>
__host__ Matrix<T> Matrix<T>::toDevice(int device) {
if (this->device == 0 && device != 0) {
// assert(device != this->device);
int dataBytes = (this->numRows * this->numCols) * sizeof(T);
T *dataRaw;
hipMalloc(&dataRaw, dataBytes);
hipMemcpy(dataRaw, this->data, dataBytes, hipMemcpyHostToDevice);
Matrix<T> ret = Matrix<T>(dataRaw, this->numRows, this->numCols, device);
return ret;
} else if (this->device != 0 && device == 0) {
// Move back to CPU
// assert(device != this->device);
int dataBytes = (this->numRows * this->numCols) * sizeof(T);
T *dataRaw = new T[this->numRows * this->numCols];
hipMemcpy(dataRaw, this->data, dataBytes, hipMemcpyDeviceToHost);
Matrix<T> ret = Matrix<T>(dataRaw, this->numRows, this->numCols, device);
return ret;
} else {
throw NotImplementedException("Matrix<T>::toDevice()");
}
}
// Removes and returns column from data
template <typename T>
__host__ pair<Matrix<T>, Matrix<T>> Matrix<T>::popColumn(int columnIndex) {
if (columnIndex < 0){
columnIndex = this->numCols + columnIndex;
}
float *data = new float[this->numRows * (this->numCols - 1)];
float *column = new float[this->numRows];
// Get specific elements from data and store in colummn
for(int row = 0; row < this->numRows; row++) {
column[row] = this->data[this->index(row, columnIndex)];
}
// Copy this->data minus the popped column to a new data matrix
// Copy first row up to columnIndex
auto start = this->data;
auto end = start + columnIndex;
auto destination = data;
copy(start, end, destination);
for(int row = 1; row < this->numRows-1; row++) {
// Adjust copy start and end as well as destination locations
start = end+1;
end += this->numCols;
destination += this->numCols - 1;
// Copy from [row-1, columnIndex+1] to (row, columnIndex)
copy(start, end, destination);
}
// Adjust copy start and end as well as destination locations
// Set end location to the end of the data matrix
start = end+1;
end = this->data + (this->numRows * this->numCols);
destination += this->numCols - 1;
// Copy from [last row, columnIndex+1] to (last row, last column)
copy(start, end, destination);
// mat.numCols--;
return make_pair(Matrix(column, this->numRows, 1), Matrix(data, this->numRows, this->numCols-1));
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulSeq(Matrix<T> &left, Matrix<G> &right) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = right.numCols;
assert(dimCenter == right.numRows);
Matrix result = Matrix<decltype(std::declval<T&>() * std::declval<G&>())>(dimLeft, dimRight);
result.fill(0);
// Matrix Mult
for (int i = 0; i < dimLeft; i++) {
for (int j = 0; j < dimRight; j++) {
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, j)] += left.data[left.index(i, k)] * right.data[right.index(k, j)];
}
}
}
return result;
}
#define TILE_WIDTH 32
template <typename T, typename G>
__global__ void matMulGPUKernel2DShmem(Matrix<T> left, Matrix<G> right, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimLeft, int dimRight, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ T leftCache[TILE_WIDTH][TILE_WIDTH];
__shared__ T rightCache[TILE_WIDTH][TILE_WIDTH];
decltype(std::declval<T&>() * std::declval<G&>()) matmulValue = 0;
for (int m = 0; m < (TILE_WIDTH + dimLeft - 1)/TILE_WIDTH; m++) {
leftCache[threadIdx.x][threadIdx.y] = left.data[left.index(i, (m * TILE_WIDTH + threadIdx.y))];
rightCache[threadIdx.x][threadIdx.y] = right.data[right.index((m * TILE_WIDTH + threadIdx.x), j)];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++) {
matmulValue += leftCache[threadIdx.x][k] * rightCache[k][threadIdx.y];
}
}
//printf("SHMEM Matmul value: %f\n", matmulValue);
//printf("dimleft: %d, Block idx: %d\n", dimLeft, blockIdx.x);
result.data[result.index(i, j)] = matmulValue;
}
template <typename T, typename G>
__global__ void matMulGPUKernel2D(Matrix<T> left, Matrix<G> right, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimLeft, int dimRight, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
decltype(std::declval<T&>() * std::declval<G&>()) matmulValue = 0;
for (int k = 0; k < dimCenter; k++) {
matmulValue += left.data[left.index(i, k)] * right.data[right.index(k, j)];
}
result.data[result.index(i, j)] = matmulValue;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulGPU(Matrix<T> &left, Matrix<G> &right) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = right.numCols;
assert(dimCenter == right.numRows);
printf("Matmul with %d x %d matrix and %d x %d matrix\n", dimLeft, dimCenter, right.numRows, dimRight);
assert(left.device == right.device);
assert(left.device != 0);
Matrix result = Matrix<decltype(std::declval<T&>() * std::declval<G&>())>(dimLeft, dimRight).toDevice(left.device); // TODO: improve this
result.fill(0);
// Launching a 2D kernel
int xBlock = (int)ceil(((float)dimLeft/512.0f));
int yBlock = (int)ceil(((float)dimRight/512.0f));
dim3 blockSize(xBlock, yBlock);
int bx = (dimLeft + blockSize.x - 1)/blockSize.x;
int by = (dimRight + blockSize.y - 1)/blockSize.y;
dim3 gridSize = dim3(bx, by);
hipEventRecord(start);
hipLaunchKernelGGL(( matMulGPUKernel2D), dim3(gridSize), dim3(blockSize), 0, 0, left, right, result, dimLeft, dimRight, dimCenter);
hipEventRecord(stop);
/*
//int blockDim = 32;
dim3 blockSize(TILE_WIDTH, TILE_WIDTH);
int xGrid = (int)ceil(((float)dimLeft/(float)TILE_WIDTH));
int yGrid = (int)ceil(((float)dimRight/(float)TILE_WIDTH));
dim3 gridSize(xGrid, yGrid);
hipEventRecord(start);
matMulGPUKernel2DShmem<<<gridSize, blockSize>>>(left, right, result, dimLeft, dimRight, dimCenter);
hipEventRecord(stop);
*/
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("GPU matmul took %f ms\n", milliseconds);
return result;
}
template <typename T, typename G>
__global__ void matMulDiagGPUKernel(Matrix<T> left, Matrix<G> diag, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, k)] = diag.data[k] * left.data[left.index(i, k)];
}
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulDiagGPU(Matrix<T> &left, Matrix<G> &diag) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
assert(dimCenter == diag.numRows);
assert(diag.numCols == 1);
// Mult mat x D
Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result = Matrix<T>(dimLeft, dimCenter).toDevice(left.device);
int blockSize = (int)ceil(((float)dimLeft/512.0f));
hipLaunchKernelGGL(( matMulDiagGPUKernel), dim3(blockSize), dim3(512), 0, 0, left, diag, result, dimCenter);
hipDeviceSynchronize();
return result;
}
template <typename T>
__global__ void matMulWalshHadamardGPUKernel(Matrix<T> left, Matrix<T> result, int dimLeft, int dimCenter) {
int pointIdx = blockIdx.x * blockDim.x + threadIdx.x;
int log2dim = ceil(log2(dimCenter));
int hShape = pow(2,log2dim);
int order = 1;
int stride = 2;
int split = stride/2;
Matrix<T> mats [] = {Matrix<T>(hShape, 1), Matrix<T>(hShape, 1)};
mats[0].fill(0);
mats[1].fill(0);
int newIdx = 0;
for (int i = 0; i < dimCenter; i++) {
mats[newIdx].data[i] = left.data[left.index(pointIdx, i)];
}
for (order = 2; order < log2dim; order++) { // cant parallize
newIdx = !newIdx;
stride = pow(2, order);
split = stride/2;
for (int strideId = 0; strideId < hShape/stride; strideId++) {
for (int idx = 0; idx < split; idx++) {
// c0
mats[newIdx].data[strideId*stride+idx] = mats[!newIdx].data[strideId*stride+idx] + mats[!newIdx].data[strideId*stride+idx+(split/2)];
// c1
mats[newIdx].data[strideId*stride+idx+split] = mats[!newIdx].data[strideId*stride+idx+split] - mats[!newIdx].data[strideId*stride+idx+split+(split/2)];
}
}
}
for (int d = 0; d < dimCenter; d++) {
result.data[result.index(pointIdx, d)] = mats[newIdx].data[d];
}
//CLEANUP
delete [] mats[0].data;
delete [] mats[1].data;
}
template <typename T>
__host__ Matrix<T> Matrix<T>::matMulWalshHadamardGPU(Matrix<T> left) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
Matrix<T> result = Matrix<T>(dimLeft, dimCenter).toDevice(left.device);
assert(dimCenter > 1); // TODO support this
int blockSize = (int)ceil(((float)dimLeft/512.0f));
hipLaunchKernelGGL(( matMulWalshHadamardGPUKernel), dim3(blockSize), dim3(512), 0, 0, left, result, dimLeft, dimCenter);
hipDeviceSynchronize();
return result;
}
template <typename T, typename G>
__global__ void matMulWithOneHotGPUKernel(Matrix<T> left, Matrix<G> oneHot, Matrix<T> result, int dimRight) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j = 0; j < dimRight; j++) {
int onehotdim = oneHot.data[j];
result.data[result.index(i, j)] = left.data[left.index(i, onehotdim)];
}
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulWithOneHotGPU(Matrix<T> left, Matrix<G> oneHot) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = oneHot.numCols;
assert(oneHot.numRows == 1);
Matrix<T> result = Matrix<T>(dimLeft, dimRight).toDevice(left.device);
int blockSize = (int)ceil(((float)dimLeft/512.0f));
hipLaunchKernelGGL(( matMulWithOneHotGPUKernel), dim3(blockSize), dim3(512), 0, 0, left, oneHot, result, dimRight);
hipDeviceSynchronize();
return result;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulDiagSeq(Matrix<T> &left, Matrix<G> &diag) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
assert(dimCenter == diag.numRows);
assert(diag.numCols == 1);
// Mult mat x D
Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result = Matrix<T>(dimLeft, dimCenter);
for (int i = 0; i < dimLeft; i++) {
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, k)] = diag.data[k] * left.data[left.index(i, k)];
}
}
return result;
}
template <typename T>
__host__ Matrix<T> Matrix<T>::matMulWalshHadamardSeq(Matrix<T> left) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
Matrix<T> result = Matrix<T>(dimLeft, dimCenter);
assert(dimCenter > 1); // TODO support this
int log2dim = ceil(log2(dimCenter));
int hShape = pow(2,log2dim);
for (int pointIdx=0; pointIdx < dimLeft; pointIdx++) { // should parallize
int order = 1;
int stride = 2;
int split = stride/2;
Matrix<T> mats [] = {Matrix<T>(hShape, 1), Matrix<T>(hShape, 1)};
mats[0].fill(0);
mats[1].fill(0);
int newIdx = 0;
for (int i = 0; i < dimCenter; i++) {
mats[newIdx].data[i] = left.data[left.index(pointIdx, i)];
}
for (order = 2; order < log2dim; order++) { // cant parallize
newIdx = !newIdx;
stride = pow(2, order);
split = stride/2;
for (int strideId = 0; strideId < hShape/stride; strideId++) { // could parallize, these two loops combined are of over original dim
for (int idx = 0; idx < split; idx++) { // could parallize
// c0
mats[newIdx].data[strideId*stride+idx] = mats[!newIdx].data[strideId*stride+idx] + mats[!newIdx].data[strideId*stride+idx+(split/2)];
// c1
mats[newIdx].data[strideId*stride+idx+split] = mats[!newIdx].data[strideId*stride+idx+split] - mats[!newIdx].data[strideId*stride+idx+split+(split/2)];
}
}
// idk why this doesnt work
// for (int idx = 0; idx < hShape; idx++) { // could parallize, these two loops combined are of over original dim
// mats[newIdx].data[idx] = mats[!newIdx].data[idx] + mats[!newIdx].data[idx + (split/2)];
// }
}
for (int d = 0; d < dimCenter; d++) {
result.data[result.index(pointIdx, d)] = mats[newIdx].data[d];
}
//CLEANUP
delete [] mats[0].data;
delete [] mats[1].data;
}
return result;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulWithOneHotSeq(Matrix<T> left, Matrix<G> oneHot) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = oneHot.numCols;
assert(oneHot.numRows == 1);
Matrix<T> result = Matrix<T>(dimLeft, dimRight);
for (int i = 0; i < dimLeft; i++) { // should parallize
for(int j = 0; j < dimRight; j++) {
int onehotdim = oneHot.data[j];
result.data[result.index(i, j)] = left.data[left.index(i, onehotdim)];
}
}
return result;
}
template <typename T>
__host__ __device__ float Matrix<T>::l2RowDistanceSeq(Matrix &left, int leftRow, Matrix &right, int rightRow) {
int dim = left.numCols;
assert(dim == right.numCols);
float currentDistance = 0.;
for (int d = 0; d < dim; d++) {
float term = left.data[left.index(leftRow, d)] - right.data[right.index(rightRow, d)];
currentDistance += term*term;
}
return currentDistance;
}
template <typename T>
__host__ __device__ void Matrix<T>::print() {
if (this->numCols != 1) {
printf("[\n");
for (int row = 0; row < this->numRows; row++) {
printf("[ ");
for (int col = 0; col < this->numCols; col++) {
printf("%s ", std::to_string(this->data[this->index(row, col)]).c_str());
// cout << this->data[this->index(row, col)] << " ";
}
printf("]\n");
}
printf("]\n");
} else {
printf("[");
for (int row = 0; row < this->numRows; row++) {
// cout << this->data[this->index(row, 0)] << " ";
printf("%s ", std::to_string(this->data[this->index(row, 0)]).c_str());
}
printf("]\n");
}
}
// template class Matrix<float>;
// template class Matrix<bool>;
// template class Matrix<int>;
|
b62f0e5c64f91851ee8b9294248a0f589694b85e.cu
|
#include "matrix.h"
template <typename T>
__host__ void Matrix<T>::fill(std::normal_distribution<float> distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator);
}
}
template <typename T>
__host__ void Matrix<T>::fill(std::bernoulli_distribution distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator)*2 - 1;
}
}
template <typename T>
__host__ void Matrix<T>::fill(std::uniform_int_distribution<> distribution) {
std::default_random_engine generator(0);
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = distribution(generator);
}
}
template <typename T>
__host__ __device__ void Matrix<T>::fill(T val) {
if (this->device==0){
for (int i = 0; i < (this->numRows * this->numCols); i++) {
this->data[i] = val;
}
} else {
cudaMemset(this->data, val, (this->numRows * this->numCols)*sizeof(T));
}
}
template <typename T>
Matrix<T> Matrix<T>::transpose() {
// int temp = this->numRows;
// this->numRows = this->numCols;
// this->numCols = temp;
// T* data = new T[this->numCols* this->numRows];
Matrix<T> thisT = Matrix<T>(this->numCols, this->numRows);
for (int r = 0; r < this->numRows; r++) {
for (int c = 0; c < this->numCols; c++) {
thisT.data[thisT.index(c,r)] = this->data[this->index(r,c)];
}
}
return thisT;
}
template <typename T>
__host__ __device__ int Matrix<T>::index(int row, int col) {
return row*(this->numCols) + col;
}
// Move matrix between CPU and device
template <typename T>
__host__ Matrix<T> Matrix<T>::toDevice(int device) {
if (this->device == 0 && device != 0) {
// assert(device != this->device);
int dataBytes = (this->numRows * this->numCols) * sizeof(T);
T *dataRaw;
cudaMalloc(&dataRaw, dataBytes);
cudaMemcpy(dataRaw, this->data, dataBytes, cudaMemcpyHostToDevice);
Matrix<T> ret = Matrix<T>(dataRaw, this->numRows, this->numCols, device);
return ret;
} else if (this->device != 0 && device == 0) {
// Move back to CPU
// assert(device != this->device);
int dataBytes = (this->numRows * this->numCols) * sizeof(T);
T *dataRaw = new T[this->numRows * this->numCols];
cudaMemcpy(dataRaw, this->data, dataBytes, cudaMemcpyDeviceToHost);
Matrix<T> ret = Matrix<T>(dataRaw, this->numRows, this->numCols, device);
return ret;
} else {
throw NotImplementedException("Matrix<T>::toDevice()");
}
}
// Removes and returns column from data
template <typename T>
__host__ pair<Matrix<T>, Matrix<T>> Matrix<T>::popColumn(int columnIndex) {
if (columnIndex < 0){
columnIndex = this->numCols + columnIndex;
}
float *data = new float[this->numRows * (this->numCols - 1)];
float *column = new float[this->numRows];
// Get specific elements from data and store in colummn
for(int row = 0; row < this->numRows; row++) {
column[row] = this->data[this->index(row, columnIndex)];
}
// Copy this->data minus the popped column to a new data matrix
// Copy first row up to columnIndex
auto start = this->data;
auto end = start + columnIndex;
auto destination = data;
copy(start, end, destination);
for(int row = 1; row < this->numRows-1; row++) {
// Adjust copy start and end as well as destination locations
start = end+1;
end += this->numCols;
destination += this->numCols - 1;
// Copy from [row-1, columnIndex+1] to (row, columnIndex)
copy(start, end, destination);
}
// Adjust copy start and end as well as destination locations
// Set end location to the end of the data matrix
start = end+1;
end = this->data + (this->numRows * this->numCols);
destination += this->numCols - 1;
// Copy from [last row, columnIndex+1] to (last row, last column)
copy(start, end, destination);
// mat.numCols--;
return make_pair(Matrix(column, this->numRows, 1), Matrix(data, this->numRows, this->numCols-1));
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulSeq(Matrix<T> &left, Matrix<G> &right) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = right.numCols;
assert(dimCenter == right.numRows);
Matrix result = Matrix<decltype(std::declval<T&>() * std::declval<G&>())>(dimLeft, dimRight);
result.fill(0);
// Matrix Mult
for (int i = 0; i < dimLeft; i++) {
for (int j = 0; j < dimRight; j++) {
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, j)] += left.data[left.index(i, k)] * right.data[right.index(k, j)];
}
}
}
return result;
}
#define TILE_WIDTH 32
template <typename T, typename G>
__global__ void matMulGPUKernel2DShmem(Matrix<T> left, Matrix<G> right, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimLeft, int dimRight, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ T leftCache[TILE_WIDTH][TILE_WIDTH];
__shared__ T rightCache[TILE_WIDTH][TILE_WIDTH];
decltype(std::declval<T&>() * std::declval<G&>()) matmulValue = 0;
for (int m = 0; m < (TILE_WIDTH + dimLeft - 1)/TILE_WIDTH; m++) {
leftCache[threadIdx.x][threadIdx.y] = left.data[left.index(i, (m * TILE_WIDTH + threadIdx.y))];
rightCache[threadIdx.x][threadIdx.y] = right.data[right.index((m * TILE_WIDTH + threadIdx.x), j)];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++) {
matmulValue += leftCache[threadIdx.x][k] * rightCache[k][threadIdx.y];
}
}
//printf("SHMEM Matmul value: %f\n", matmulValue);
//printf("dimleft: %d, Block idx: %d\n", dimLeft, blockIdx.x);
result.data[result.index(i, j)] = matmulValue;
}
template <typename T, typename G>
__global__ void matMulGPUKernel2D(Matrix<T> left, Matrix<G> right, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimLeft, int dimRight, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
decltype(std::declval<T&>() * std::declval<G&>()) matmulValue = 0;
for (int k = 0; k < dimCenter; k++) {
matmulValue += left.data[left.index(i, k)] * right.data[right.index(k, j)];
}
result.data[result.index(i, j)] = matmulValue;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulGPU(Matrix<T> &left, Matrix<G> &right) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = right.numCols;
assert(dimCenter == right.numRows);
printf("Matmul with %d x %d matrix and %d x %d matrix\n", dimLeft, dimCenter, right.numRows, dimRight);
assert(left.device == right.device);
assert(left.device != 0);
Matrix result = Matrix<decltype(std::declval<T&>() * std::declval<G&>())>(dimLeft, dimRight).toDevice(left.device); // TODO: improve this
result.fill(0);
// Launching a 2D kernel
int xBlock = (int)ceil(((float)dimLeft/512.0f));
int yBlock = (int)ceil(((float)dimRight/512.0f));
dim3 blockSize(xBlock, yBlock);
int bx = (dimLeft + blockSize.x - 1)/blockSize.x;
int by = (dimRight + blockSize.y - 1)/blockSize.y;
dim3 gridSize = dim3(bx, by);
cudaEventRecord(start);
matMulGPUKernel2D<<<gridSize, blockSize>>>(left, right, result, dimLeft, dimRight, dimCenter);
cudaEventRecord(stop);
/*
//int blockDim = 32;
dim3 blockSize(TILE_WIDTH, TILE_WIDTH);
int xGrid = (int)ceil(((float)dimLeft/(float)TILE_WIDTH));
int yGrid = (int)ceil(((float)dimRight/(float)TILE_WIDTH));
dim3 gridSize(xGrid, yGrid);
cudaEventRecord(start);
matMulGPUKernel2DShmem<<<gridSize, blockSize>>>(left, right, result, dimLeft, dimRight, dimCenter);
cudaEventRecord(stop);
*/
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU matmul took %f ms\n", milliseconds);
return result;
}
template <typename T, typename G>
__global__ void matMulDiagGPUKernel(Matrix<T> left, Matrix<G> diag, Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result, int dimCenter) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, k)] = diag.data[k] * left.data[left.index(i, k)];
}
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulDiagGPU(Matrix<T> &left, Matrix<G> &diag) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
assert(dimCenter == diag.numRows);
assert(diag.numCols == 1);
// Mult mat x D
Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result = Matrix<T>(dimLeft, dimCenter).toDevice(left.device);
int blockSize = (int)ceil(((float)dimLeft/512.0f));
matMulDiagGPUKernel<<<blockSize, 512>>>(left, diag, result, dimCenter);
cudaDeviceSynchronize();
return result;
}
template <typename T>
__global__ void matMulWalshHadamardGPUKernel(Matrix<T> left, Matrix<T> result, int dimLeft, int dimCenter) {
int pointIdx = blockIdx.x * blockDim.x + threadIdx.x;
int log2dim = ceil(log2(dimCenter));
int hShape = pow(2,log2dim);
int order = 1;
int stride = 2;
int split = stride/2;
Matrix<T> mats [] = {Matrix<T>(hShape, 1), Matrix<T>(hShape, 1)};
mats[0].fill(0);
mats[1].fill(0);
int newIdx = 0;
for (int i = 0; i < dimCenter; i++) {
mats[newIdx].data[i] = left.data[left.index(pointIdx, i)];
}
for (order = 2; order < log2dim; order++) { // cant parallize
newIdx = !newIdx;
stride = pow(2, order);
split = stride/2;
for (int strideId = 0; strideId < hShape/stride; strideId++) {
for (int idx = 0; idx < split; idx++) {
// c0
mats[newIdx].data[strideId*stride+idx] = mats[!newIdx].data[strideId*stride+idx] + mats[!newIdx].data[strideId*stride+idx+(split/2)];
// c1
mats[newIdx].data[strideId*stride+idx+split] = mats[!newIdx].data[strideId*stride+idx+split] - mats[!newIdx].data[strideId*stride+idx+split+(split/2)];
}
}
}
for (int d = 0; d < dimCenter; d++) {
result.data[result.index(pointIdx, d)] = mats[newIdx].data[d];
}
//CLEANUP
delete [] mats[0].data;
delete [] mats[1].data;
}
template <typename T>
__host__ Matrix<T> Matrix<T>::matMulWalshHadamardGPU(Matrix<T> left) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
Matrix<T> result = Matrix<T>(dimLeft, dimCenter).toDevice(left.device);
assert(dimCenter > 1); // TODO support this
int blockSize = (int)ceil(((float)dimLeft/512.0f));
matMulWalshHadamardGPUKernel<<<blockSize, 512>>>(left, result, dimLeft, dimCenter);
cudaDeviceSynchronize();
return result;
}
template <typename T, typename G>
__global__ void matMulWithOneHotGPUKernel(Matrix<T> left, Matrix<G> oneHot, Matrix<T> result, int dimRight) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j = 0; j < dimRight; j++) {
int onehotdim = oneHot.data[j];
result.data[result.index(i, j)] = left.data[left.index(i, onehotdim)];
}
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulWithOneHotGPU(Matrix<T> left, Matrix<G> oneHot) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = oneHot.numCols;
assert(oneHot.numRows == 1);
Matrix<T> result = Matrix<T>(dimLeft, dimRight).toDevice(left.device);
int blockSize = (int)ceil(((float)dimLeft/512.0f));
matMulWithOneHotGPUKernel<<<blockSize, 512>>>(left, oneHot, result, dimRight);
cudaDeviceSynchronize();
return result;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulDiagSeq(Matrix<T> &left, Matrix<G> &diag) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
assert(dimCenter == diag.numRows);
assert(diag.numCols == 1);
// Mult mat x D
Matrix<decltype(std::declval<T&>() * std::declval<G&>())> result = Matrix<T>(dimLeft, dimCenter);
for (int i = 0; i < dimLeft; i++) {
for (int k = 0; k < dimCenter; k++) {
result.data[result.index(i, k)] = diag.data[k] * left.data[left.index(i, k)];
}
}
return result;
}
template <typename T>
__host__ Matrix<T> Matrix<T>::matMulWalshHadamardSeq(Matrix<T> left) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
Matrix<T> result = Matrix<T>(dimLeft, dimCenter);
assert(dimCenter > 1); // TODO support this
int log2dim = ceil(log2(dimCenter));
int hShape = pow(2,log2dim);
for (int pointIdx=0; pointIdx < dimLeft; pointIdx++) { // should parallize
int order = 1;
int stride = 2;
int split = stride/2;
Matrix<T> mats [] = {Matrix<T>(hShape, 1), Matrix<T>(hShape, 1)};
mats[0].fill(0);
mats[1].fill(0);
int newIdx = 0;
for (int i = 0; i < dimCenter; i++) {
mats[newIdx].data[i] = left.data[left.index(pointIdx, i)];
}
for (order = 2; order < log2dim; order++) { // cant parallize
newIdx = !newIdx;
stride = pow(2, order);
split = stride/2;
for (int strideId = 0; strideId < hShape/stride; strideId++) { // could parallize, these two loops combined are of over original dim
for (int idx = 0; idx < split; idx++) { // could parallize
// c0
mats[newIdx].data[strideId*stride+idx] = mats[!newIdx].data[strideId*stride+idx] + mats[!newIdx].data[strideId*stride+idx+(split/2)];
// c1
mats[newIdx].data[strideId*stride+idx+split] = mats[!newIdx].data[strideId*stride+idx+split] - mats[!newIdx].data[strideId*stride+idx+split+(split/2)];
}
}
// idk why this doesnt work
// for (int idx = 0; idx < hShape; idx++) { // could parallize, these two loops combined are of over original dim
// mats[newIdx].data[idx] = mats[!newIdx].data[idx] + mats[!newIdx].data[idx + (split/2)];
// }
}
for (int d = 0; d < dimCenter; d++) {
result.data[result.index(pointIdx, d)] = mats[newIdx].data[d];
}
//CLEANUP
delete [] mats[0].data;
delete [] mats[1].data;
}
return result;
}
template <typename T>
template <typename G>
__host__ Matrix<decltype(std::declval<T&>() * std::declval<G&>())> Matrix<T>::matMulWithOneHotSeq(Matrix<T> left, Matrix<G> oneHot) {
int dimLeft = left.numRows;
int dimCenter = left.numCols;
int dimRight = oneHot.numCols;
assert(oneHot.numRows == 1);
Matrix<T> result = Matrix<T>(dimLeft, dimRight);
for (int i = 0; i < dimLeft; i++) { // should parallize
for(int j = 0; j < dimRight; j++) {
int onehotdim = oneHot.data[j];
result.data[result.index(i, j)] = left.data[left.index(i, onehotdim)];
}
}
return result;
}
template <typename T>
__host__ __device__ float Matrix<T>::l2RowDistanceSeq(Matrix &left, int leftRow, Matrix &right, int rightRow) {
int dim = left.numCols;
assert(dim == right.numCols);
float currentDistance = 0.;
for (int d = 0; d < dim; d++) {
float term = left.data[left.index(leftRow, d)] - right.data[right.index(rightRow, d)];
currentDistance += term*term;
}
return currentDistance;
}
template <typename T>
__host__ __device__ void Matrix<T>::print() {
if (this->numCols != 1) {
printf("[\n");
for (int row = 0; row < this->numRows; row++) {
printf("[ ");
for (int col = 0; col < this->numCols; col++) {
printf("%s ", std::to_string(this->data[this->index(row, col)]).c_str());
// cout << this->data[this->index(row, col)] << " ";
}
printf("]\n");
}
printf("]\n");
} else {
printf("[");
for (int row = 0; row < this->numRows; row++) {
// cout << this->data[this->index(row, 0)] << " ";
printf("%s ", std::to_string(this->data[this->index(row, 0)]).c_str());
}
printf("]\n");
}
}
// template class Matrix<float>;
// template class Matrix<bool>;
// template class Matrix<int>;
|
87a8eea5d8ce08506068c8b3f9eebe75e04d467f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define ROUND_OFF 50000
#define CUDA_NUM_THREADS 1024
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define GET_BLOCKS(n, t) (n+t-1) / t
// == Dimension rearrangement Kernel
__global__ void CorrelateData_1d(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top)
{
extern __shared__ char patch_data_char[];
float *patch_data = (float *)patch_data_char;
// First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1
int x1 = blockIdx.x*stride1 + max_displacement;
int y1 = blockIdx.y*stride1;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP];
// Compute correlation
for(int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2;
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int x2 = x1 + s2o;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch;
//int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
//printf("x1 %d x2 %d bh %d bw %d bc %d i %d ch %d y1 %d idx2 %d\n", x1, x2, bottomheight, bottomwidth, bottomchannels, item, ch, y1, idx2);
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if(ch_off == 0) {
float total_sum = 0;
for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) {
total_sum += sum[idx];
}
//printf("ch_off %d sum %f\n", ch_off, total_sum);
const int sumelems = kernel_size*kernel_size*bottomchannels;
const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x;
top[index + item*topcount] = total_sum / (float)sumelems;
}
}
// Aggregate
}
|
87a8eea5d8ce08506068c8b3f9eebe75e04d467f.cu
|
#include "includes.h"
#define ROUND_OFF 50000
#define CUDA_NUM_THREADS 1024
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define GET_BLOCKS(n, t) (n+t-1) / t
// == Dimension rearrangement Kernel
__global__ void CorrelateData_1d(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top)
{
extern __shared__ char patch_data_char[];
float *patch_data = (float *)patch_data_char;
// First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1
int x1 = blockIdx.x*stride1 + max_displacement;
int y1 = blockIdx.y*stride1;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP];
// Compute correlation
for(int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2;
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int x2 = x1 + s2o;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch;
//int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
//printf("x1 %d x2 %d bh %d bw %d bc %d i %d ch %d y1 %d idx2 %d\n", x1, x2, bottomheight, bottomwidth, bottomchannels, item, ch, y1, idx2);
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if(ch_off == 0) {
float total_sum = 0;
for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) {
total_sum += sum[idx];
}
//printf("ch_off %d sum %f\n", ch_off, total_sum);
const int sumelems = kernel_size*kernel_size*bottomchannels;
const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x;
top[index + item*topcount] = total_sum / (float)sumelems;
}
}
// Aggregate
}
|
2c7b66622386d9360b4e4029a2fcb9bf288278bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_top;
int xdim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_top;
int ydim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_top;
int xdim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_top;
int ydim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_top * \
ydim0_update_halo_kernel2_yvel_minus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_top * \
ydim1_update_halo_kernel2_yvel_minus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_top_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top *
ydim0_update_halo_kernel2_yvel_minus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top *
ydim1_update_halo_kernel2_yvel_minus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 82))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(82, "update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[82].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_top_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_top_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_top_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_top_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[82].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_4_top), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[82].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[82].mpi_time += t2 - t1;
OPS_kernels[82].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[82].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
2c7b66622386d9360b4e4029a2fcb9bf288278bf.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_top;
int xdim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_top;
int ydim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_top;
int xdim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_top;
int ydim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_top * \
ydim0_update_halo_kernel2_yvel_minus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_top * \
ydim1_update_halo_kernel2_yvel_minus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_top_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top *
ydim0_update_halo_kernel2_yvel_minus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top *
ydim1_update_halo_kernel2_yvel_minus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 82))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(82, "update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[82].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_top_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_top_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_top_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_top_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[82].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_minus_4_top<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[82].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[82].mpi_time += t2 - t1;
OPS_kernels[82].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[82].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
deab5da0579f9ab6386cde4ec74759e4b90e35c3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rgb_to_yuv_convert_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../rgb_to_yuv_convert_layer.h"
#include "../neural_network_exception.h"
#include "../nn_types.h"
#define w_r 0.299F
#define w_b 0.114F
#define w_g (1.0F - w_r - w_b)
#define u_max 0.436F
#define v_max 0.615F
#define u_mult (u_max / (1.0F - w_b))
#define v_mult (v_max / (1.0F - w_r))
#define reverse_r_v_mult ((1.0F - w_r) / v_max)
#define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g))
#define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g))
#define reverse_b_u_mult ((1.0F - w_b) / u_max)
namespace nnforge
{
namespace cuda
{
__global__ void rgb_to_yuv_convert_upd_kernel(
const float * __restrict input,
float * __restrict output,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float red = input[red_and_y_offset];
float green = input[green_and_u_offset];
float blue = input[blue_and_v_offset];
float y = w_r * red + w_g * green + w_b * blue;
float u = u_mult * (blue - y);
float v = v_mult * (red - y);
output[red_and_y_offset] = y;
output[green_and_u_offset] = u;
output[blue_and_v_offset] = v;
}
}
__global__ void rgb_to_yuv_convert_deriviative_upd_kernel(
float * __restrict errors,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float y = errors[red_and_y_offset];
float u = errors[green_and_u_offset];
float v = errors[blue_and_v_offset];
float red = y + reverse_r_v_mult * v;
float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v;
float blue = y + reverse_b_u_mult * u;
errors[red_and_y_offset] = red;
errors[green_and_u_offset] = green;
errors[blue_and_v_offset] = blue;
}
}
rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda()
{
}
rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda()
{
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
hipLaunchKernelGGL(( rgb_to_yuv_convert_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[0],
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
hipLaunchKernelGGL(( rgb_to_yuv_convert_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_errors_buffer,
*schema_data[0],
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
void rgb_to_yuv_convert_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("rgb_to_yuv_convert_layer_updater_cuda is not able to run using the same input");
nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema);
color_feature_map_config_count = layer_derived->color_feature_map_config_list.size();
}
}
}
|
deab5da0579f9ab6386cde4ec74759e4b90e35c3.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rgb_to_yuv_convert_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../rgb_to_yuv_convert_layer.h"
#include "../neural_network_exception.h"
#include "../nn_types.h"
#define w_r 0.299F
#define w_b 0.114F
#define w_g (1.0F - w_r - w_b)
#define u_max 0.436F
#define v_max 0.615F
#define u_mult (u_max / (1.0F - w_b))
#define v_mult (v_max / (1.0F - w_r))
#define reverse_r_v_mult ((1.0F - w_r) / v_max)
#define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g))
#define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g))
#define reverse_b_u_mult ((1.0F - w_b) / u_max)
namespace nnforge
{
namespace cuda
{
__global__ void rgb_to_yuv_convert_upd_kernel(
const float * __restrict input,
float * __restrict output,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float red = input[red_and_y_offset];
float green = input[green_and_u_offset];
float blue = input[blue_and_v_offset];
float y = w_r * red + w_g * green + w_b * blue;
float u = u_mult * (blue - y);
float v = v_mult * (red - y);
output[red_and_y_offset] = y;
output[green_and_u_offset] = u;
output[blue_and_v_offset] = v;
}
}
__global__ void rgb_to_yuv_convert_deriviative_upd_kernel(
float * __restrict errors,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float y = errors[red_and_y_offset];
float u = errors[green_and_u_offset];
float v = errors[blue_and_v_offset];
float red = y + reverse_r_v_mult * v;
float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v;
float blue = y + reverse_b_u_mult * u;
errors[red_and_y_offset] = red;
errors[green_and_u_offset] = green;
errors[blue_and_v_offset] = blue;
}
}
rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda()
{
}
rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda()
{
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
rgb_to_yuv_convert_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[0],
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
rgb_to_yuv_convert_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*schema_data[0],
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
void rgb_to_yuv_convert_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("rgb_to_yuv_convert_layer_updater_cuda is not able to run using the same input");
nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema);
color_feature_map_config_count = layer_derived->color_feature_map_config_list.size();
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.