hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
679dd3bb75c92e8652b1b54a4f4c98b01cfb4f30.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/lists/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <cub/device/device_segmented_radix_sort.cuh>
namespace cudf {
namespace lists {
namespace detail {
struct SegmentedSortColumn {
template <typename KeyT, typename ValueT, typename OffsetIteratorT>
void SortPairsAscending(KeyT const* keys_in,
KeyT* keys_out,
ValueT const* values_in,
ValueT* values_out,
int num_items,
int num_segments,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
hipcub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
}
template <typename KeyT, typename ValueT, typename OffsetIteratorT>
void SortPairsDescending(KeyT const* keys_in,
KeyT* keys_out,
ValueT const* values_in,
ValueT* values_out,
int num_items,
int num_segments,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
}
template <typename T>
std::enable_if_t<not is_numeric<T>(), std::unique_ptr<column>> operator()(
column_view const& child,
column_view const& segment_offsets,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto child_table = segmented_sort_by_key(table_view{{child}},
table_view{{child}},
segment_offsets,
{column_order},
{null_precedence},
stream,
mr);
return std::move(child_table->release().front());
}
template <typename T>
std::enable_if_t<is_numeric<T>(), std::unique_ptr<column>> operator()(
column_view const& child,
column_view const& offsets,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// the average list size at which to prefer radixsort:
constexpr cudf::size_type MIN_AVG_LIST_SIZE_FOR_RADIXSORT{100};
if ((child.size() / offsets.size()) < MIN_AVG_LIST_SIZE_FOR_RADIXSORT) {
auto child_table = segmented_sort_by_key(table_view{{child}},
table_view{{child}},
offsets,
{column_order},
{null_precedence},
stream,
mr);
return std::move(child_table->release().front());
}
auto output =
cudf::detail::allocate_like(child, child.size(), mask_allocation_policy::NEVER, stream, mr);
mutable_column_view mutable_output_view = output->mutable_view();
auto keys = [&]() {
if (child.nullable()) {
rmm::device_uvector<T> keys(child.size(), stream);
auto const null_replace_T = null_precedence == null_order::AFTER
? std::numeric_limits<T>::max()
: std::numeric_limits<T>::min();
auto device_child = column_device_view::create(child, stream);
auto keys_in =
cudf::detail::make_null_replacement_iterator<T>(*device_child, null_replace_T);
thrust::copy_n(rmm::exec_policy(stream), keys_in, child.size(), keys.begin());
return keys;
}
return rmm::device_uvector<T>{0, stream};
}();
std::unique_ptr<column> sorted_indices = cudf::make_numeric_column(
data_type(type_to_id<size_type>()), child.size(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mutable_indices_view = sorted_indices->mutable_view();
thrust::sequence(rmm::exec_policy(stream),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.end<size_type>(),
0);
if (column_order == order::ASCENDING)
SortPairsAscending(child.nullable() ? keys.data() : child.begin<T>(),
mutable_output_view.begin<T>(),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.begin<size_type>(),
child.size(),
offsets.size() - 1,
offsets.begin<size_type>(),
offsets.begin<size_type>() + 1,
stream);
else
SortPairsDescending(child.nullable() ? keys.data() : child.begin<T>(),
mutable_output_view.begin<T>(),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.begin<size_type>(),
child.size(),
offsets.size() - 1,
offsets.begin<size_type>(),
offsets.begin<size_type>() + 1,
stream);
std::vector<std::unique_ptr<column>> output_cols;
output_cols.push_back(std::move(output));
// rearrange the null_mask.
cudf::detail::gather_bitmask(cudf::table_view{{child}},
mutable_indices_view.begin<size_type>(),
output_cols,
cudf::detail::gather_bitmask_op::DONT_CHECK,
stream,
mr);
return std::move(output_cols.front());
}
};
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input.parent());
auto output_offset = make_numeric_column(
input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
output_offset->mutable_view().begin<size_type>(),
[first = input.offsets_begin()] __device__(auto offset_index) {
return offset_index - *first;
});
// for numeric columns, calls Faster segmented radix sort path
// for non-numeric columns, calls segmented_sort_by_key.
auto output_child = type_dispatcher(input.child().type(),
SegmentedSortColumn{},
input.get_sliced_child(stream),
output_offset->view(),
column_order,
null_precedence,
stream,
mr);
auto null_mask = cudf::detail::copy_bitmask(input.parent(), stream, mr);
// Assemble list column & return
return make_lists_column(input.size(),
std::move(output_offset),
std::move(output_child),
input.null_count(),
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return empty_like(input.parent()); }
auto output_offset = make_numeric_column(
input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
output_offset->mutable_view().template begin<size_type>(),
[first = input.offsets_begin()] __device__(auto offset_index) {
return offset_index - *first;
});
auto const child = input.get_sliced_child(stream);
auto const sorted_child_table = stable_segmented_sort_by_key(table_view{{child}},
table_view{{child}},
output_offset->view(),
{column_order},
{null_precedence},
stream,
mr);
return make_lists_column(input.size(),
std::move(output_offset),
std::move(sorted_child_table->release().front()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sort_lists(input, column_order, null_precedence, cudf::default_stream_value, mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_sort_lists(
input, column_order, null_precedence, cudf::default_stream_value, mr);
}
} // namespace lists
} // namespace cudf
| 679dd3bb75c92e8652b1b54a4f4c98b01cfb4f30.cu | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/lists/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <cub/device/device_segmented_radix_sort.cuh>
namespace cudf {
namespace lists {
namespace detail {
struct SegmentedSortColumn {
template <typename KeyT, typename ValueT, typename OffsetIteratorT>
void SortPairsAscending(KeyT const* keys_in,
KeyT* keys_out,
ValueT const* values_in,
ValueT* values_out,
int num_items,
int num_segments,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
}
template <typename KeyT, typename ValueT, typename OffsetIteratorT>
void SortPairsDescending(KeyT const* keys_in,
KeyT* keys_out,
ValueT const* values_in,
ValueT* values_out,
int num_items,
int num_segments,
OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(),
temp_storage_bytes,
keys_in,
keys_out,
values_in,
values_out,
num_items,
num_segments,
begin_offsets,
end_offsets,
0,
sizeof(KeyT) * 8,
stream.value());
}
template <typename T>
std::enable_if_t<not is_numeric<T>(), std::unique_ptr<column>> operator()(
column_view const& child,
column_view const& segment_offsets,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto child_table = segmented_sort_by_key(table_view{{child}},
table_view{{child}},
segment_offsets,
{column_order},
{null_precedence},
stream,
mr);
return std::move(child_table->release().front());
}
template <typename T>
std::enable_if_t<is_numeric<T>(), std::unique_ptr<column>> operator()(
column_view const& child,
column_view const& offsets,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// the average list size at which to prefer radixsort:
constexpr cudf::size_type MIN_AVG_LIST_SIZE_FOR_RADIXSORT{100};
if ((child.size() / offsets.size()) < MIN_AVG_LIST_SIZE_FOR_RADIXSORT) {
auto child_table = segmented_sort_by_key(table_view{{child}},
table_view{{child}},
offsets,
{column_order},
{null_precedence},
stream,
mr);
return std::move(child_table->release().front());
}
auto output =
cudf::detail::allocate_like(child, child.size(), mask_allocation_policy::NEVER, stream, mr);
mutable_column_view mutable_output_view = output->mutable_view();
auto keys = [&]() {
if (child.nullable()) {
rmm::device_uvector<T> keys(child.size(), stream);
auto const null_replace_T = null_precedence == null_order::AFTER
? std::numeric_limits<T>::max()
: std::numeric_limits<T>::min();
auto device_child = column_device_view::create(child, stream);
auto keys_in =
cudf::detail::make_null_replacement_iterator<T>(*device_child, null_replace_T);
thrust::copy_n(rmm::exec_policy(stream), keys_in, child.size(), keys.begin());
return keys;
}
return rmm::device_uvector<T>{0, stream};
}();
std::unique_ptr<column> sorted_indices = cudf::make_numeric_column(
data_type(type_to_id<size_type>()), child.size(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mutable_indices_view = sorted_indices->mutable_view();
thrust::sequence(rmm::exec_policy(stream),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.end<size_type>(),
0);
if (column_order == order::ASCENDING)
SortPairsAscending(child.nullable() ? keys.data() : child.begin<T>(),
mutable_output_view.begin<T>(),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.begin<size_type>(),
child.size(),
offsets.size() - 1,
offsets.begin<size_type>(),
offsets.begin<size_type>() + 1,
stream);
else
SortPairsDescending(child.nullable() ? keys.data() : child.begin<T>(),
mutable_output_view.begin<T>(),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.begin<size_type>(),
child.size(),
offsets.size() - 1,
offsets.begin<size_type>(),
offsets.begin<size_type>() + 1,
stream);
std::vector<std::unique_ptr<column>> output_cols;
output_cols.push_back(std::move(output));
// rearrange the null_mask.
cudf::detail::gather_bitmask(cudf::table_view{{child}},
mutable_indices_view.begin<size_type>(),
output_cols,
cudf::detail::gather_bitmask_op::DONT_CHECK,
stream,
mr);
return std::move(output_cols.front());
}
};
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input.parent());
auto output_offset = make_numeric_column(
input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
output_offset->mutable_view().begin<size_type>(),
[first = input.offsets_begin()] __device__(auto offset_index) {
return offset_index - *first;
});
// for numeric columns, calls Faster segmented radix sort path
// for non-numeric columns, calls segmented_sort_by_key.
auto output_child = type_dispatcher(input.child().type(),
SegmentedSortColumn{},
input.get_sliced_child(stream),
output_offset->view(),
column_order,
null_precedence,
stream,
mr);
auto null_mask = cudf::detail::copy_bitmask(input.parent(), stream, mr);
// Assemble list column & return
return make_lists_column(input.size(),
std::move(output_offset),
std::move(output_child),
input.null_count(),
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return empty_like(input.parent()); }
auto output_offset = make_numeric_column(
input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
output_offset->mutable_view().template begin<size_type>(),
[first = input.offsets_begin()] __device__(auto offset_index) {
return offset_index - *first;
});
auto const child = input.get_sliced_child(stream);
auto const sorted_child_table = stable_segmented_sort_by_key(table_view{{child}},
table_view{{child}},
output_offset->view(),
{column_order},
{null_precedence},
stream,
mr);
return make_lists_column(input.size(),
std::move(output_offset),
std::move(sorted_child_table->release().front()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sort_lists(input, column_order, null_precedence, cudf::default_stream_value, mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_sort_lists(
input, column_order, null_precedence, cudf::default_stream_value, mr);
}
} // namespace lists
} // namespace cudf
|
c212de5c1349008966e72d079b2833207ec0eee6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( hipMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X]);
}
else
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
}
if (blockIdx.x + 2 < gridDim.x)
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + j * BLOCK_DIM_X]);
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X]);
}
else
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
hipLaunchKernelGGL(( linearRowFilter<KSIZE, T, D>), dim3(grid), dim3(block), 0, stream, src, dst, anchor, brd);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<int , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | c212de5c1349008966e72d079b2833207ec0eee6.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( cudaMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X]);
}
else
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
}
if (blockIdx.x + 2 < gridDim.x)
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + j * BLOCK_DIM_X]);
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X]);
}
else
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
linearRowFilter<KSIZE, T, D><<<grid, block, 0, stream>>>(src, dst, anchor, brd);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<int , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
f6541f0e09847e36717bbf5f3212b7096ba905f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "BKKCrypt.h"
#include <string.h>
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
//Here is the magic, the core of the algorithm
__global__ void BKKCryptKernel(const char *d_in, char *d_out, size_t length)
{
//every thread process one element, the index of this element:
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < length) {
d_out[index] = d_in[index];
}
}
bool BKKCrypt(const char *in, char *out)
{
char *d_in = NULL; //pointers to the device memory
char *d_out = NULL;
size_t length = strlen(in) + 1; //size of the input and (hopefully) the output array
bool returnedValue = true;
//allocating device memory
if (hipMalloc((void**)&d_in, length) != hipSuccess) {
returnedValue = false;
goto Error;
}
if (hipMalloc((void**)&d_out, length) != hipSuccess) {
returnedValue = false;
goto Error;
}
//copy the input array to the device
if (hipMemcpy(d_in, in, length, hipMemcpyHostToDevice) != hipSuccess) {
returnedValue = false;
goto Error;
}
//computing the grid and block size.
//we choose the block size to be equal to the numbers of threads in a warp
//the grid size is calculated so that we can process every input elements
hipDeviceProp_t device;
hipGetDeviceProperties(&device, 0);
int threadsPerBlock = device.warpSize;
int blocks = (length + threadsPerBlock - 1) / threadsPerBlock;
//The magic
hipLaunchKernelGGL(( BKKCryptKernel) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, d_in, d_out, length);
hipDeviceSynchronize();
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
returnedValue = false;
goto Error;
}
//copy the output string to the host
if (hipMemcpy(out, d_out, length, hipMemcpyDeviceToHost) != hipSuccess) {
returnedValue = false;
goto Error;
}
Error:
//We have to clean up. Very important!!!
hipFree(d_in);
hipFree(d_out);
return returnedValue;
}
| f6541f0e09847e36717bbf5f3212b7096ba905f3.cu | #include "BKKCrypt.h"
#include <string.h>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
//Here is the magic, the core of the algorithm
__global__ void BKKCryptKernel(const char *d_in, char *d_out, size_t length)
{
//every thread process one element, the index of this element:
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < length) {
d_out[index] = d_in[index];
}
}
bool BKKCrypt(const char *in, char *out)
{
char *d_in = NULL; //pointers to the device memory
char *d_out = NULL;
size_t length = strlen(in) + 1; //size of the input and (hopefully) the output array
bool returnedValue = true;
//allocating device memory
if (cudaMalloc((void**)&d_in, length) != cudaSuccess) {
returnedValue = false;
goto Error;
}
if (cudaMalloc((void**)&d_out, length) != cudaSuccess) {
returnedValue = false;
goto Error;
}
//copy the input array to the device
if (cudaMemcpy(d_in, in, length, cudaMemcpyHostToDevice) != cudaSuccess) {
returnedValue = false;
goto Error;
}
//computing the grid and block size.
//we choose the block size to be equal to the numbers of threads in a warp
//the grid size is calculated so that we can process every input elements
cudaDeviceProp device;
cudaGetDeviceProperties(&device, 0);
int threadsPerBlock = device.warpSize;
int blocks = (length + threadsPerBlock - 1) / threadsPerBlock;
//The magic
BKKCryptKernel <<<blocks, threadsPerBlock >>> (d_in, d_out, length);
cudaDeviceSynchronize();
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
returnedValue = false;
goto Error;
}
//copy the output string to the host
if (cudaMemcpy(out, d_out, length, cudaMemcpyDeviceToHost) != cudaSuccess) {
returnedValue = false;
goto Error;
}
Error:
//We have to clean up. Very important!!!
cudaFree(d_in);
cudaFree(d_out);
return returnedValue;
}
|
685b587c896568af8d42e0c26d59d8ab81719719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 16
#define STR_SIZE 256
#ifndef SIZE
#define SIZE 1024
#endif
#define ITER 5000
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD 3000000.0f
/* required precision in degrees */
#define PRECISION 0.001f
#define SPEC_HEAT_SI 1750000.0f
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5f
/* chip parameters */
float t_chip = 0.0005f;
float chip_height = 0.016f;
float chip_width = 0.016f;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0f;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
/*
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 ) {
printf( "The file was not opened\n" );
}
for (i=0; i < grid_rows; i++) {
for (j=0; j < grid_cols; j++) {
sprintf(str, "%g\n", vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
}
fclose(fp);
*/
#ifdef OUTPUT
int i,j;
for (i=0; i < grid_rows; i++) {
for (j=0; j < grid_cols; j++) {
printf("%f\n", vect[i*grid_cols+j]);
}
}
#else
printf("%f\n", vect[0]);
#endif
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file)
{
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++) {
for (j=0; j <= grid_cols-1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
//float Cap,
float Rx_1,
float Ry_1,
float Rz_1,
float step_div_Cap)
{
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0f;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
/*
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
*/
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2; //EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2; //EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_rows*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++) {
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) &&
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0f*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0f*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1) {
break;
}
if(computed) { //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
}
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row,
int total_iterations, int num_iterations, int blockCols,
int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0f * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0f * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float Rx_1 = 1.0f/Rx;
float Ry_1 = 1.0f/Ry;
float Rz_1 = 1.0f/Rz;
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float step_div_Cap = step/Cap;
int src = 1, dst = 0;
int count = 0;
struct timeval tv1, tv2;
gettimeofday( &tv1, NULL);
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t),
MatrixPower,MatrixTemp[src],MatrixTemp[dst],
col,row,borderCols, borderRows, Rx_1,Ry_1,Rz_1,
step_div_Cap);
//hipDeviceSynchronize();
count++;
}
gettimeofday( &tv2, NULL);
double runtime = ((tv2.tv_sec+ tv2.tv_usec/1000000.0)-(tv1.tv_sec+ tv1.tv_usec/1000000.0));
printf("Runtime(seconds): %f\n", runtime);
printf("kernel has been executed for %d times\n", count);
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
run(argc,argv);
return( 0);
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations;
int pyramid_height; // number of iterations
/*
if (argc != 7) {
usage(argc, argv);
}
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0) {
usage(argc, argv);
}
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
*/
grid_rows = SIZE;
grid_cols = SIZE;
total_iterations = ITER;
pyramid_height = 2;
tfile=argv[1];
pfile=argv[2];
ofile=argv[3];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0) ? 0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0) ? 0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut) {
fatal("unable to allocate memory");
}
#ifdef VERBOSE
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
#endif
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
#ifdef VERBOSE
printf("Start computing the transient temperature\n");
#endif
/* Main computation */
int ret = compute_tran_temp( MatrixPower,MatrixTemp,grid_cols,grid_rows,
total_iterations,pyramid_height, blockCols,
blockRows, borderCols, borderRows);
#ifdef VERBOSE
printf("Ending simulation\n");
#endif
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
writeoutput( MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| 685b587c896568af8d42e0c26d59d8ab81719719.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 16
#define STR_SIZE 256
#ifndef SIZE
#define SIZE 1024
#endif
#define ITER 5000
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD 3000000.0f
/* required precision in degrees */
#define PRECISION 0.001f
#define SPEC_HEAT_SI 1750000.0f
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5f
/* chip parameters */
float t_chip = 0.0005f;
float chip_height = 0.016f;
float chip_width = 0.016f;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0f;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
/*
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 ) {
printf( "The file was not opened\n" );
}
for (i=0; i < grid_rows; i++) {
for (j=0; j < grid_cols; j++) {
sprintf(str, "%g\n", vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
}
fclose(fp);
*/
#ifdef OUTPUT
int i,j;
for (i=0; i < grid_rows; i++) {
for (j=0; j < grid_cols; j++) {
printf("%f\n", vect[i*grid_cols+j]);
}
}
#else
printf("%f\n", vect[0]);
#endif
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file)
{
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++) {
for (j=0; j <= grid_cols-1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
//float Cap,
float Rx_1,
float Ry_1,
float Rz_1,
float step_div_Cap)
{
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0f;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
/*
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
*/
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2; //EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2; //EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_rows*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++) {
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) &&
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0f*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0f*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1) {
break;
}
if(computed) { //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
}
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row,
int total_iterations, int num_iterations, int blockCols,
int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0f * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0f * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float Rx_1 = 1.0f/Rx;
float Ry_1 = 1.0f/Ry;
float Rz_1 = 1.0f/Rz;
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float step_div_Cap = step/Cap;
int src = 1, dst = 0;
int count = 0;
struct timeval tv1, tv2;
gettimeofday( &tv1, NULL);
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>( MIN(num_iterations, total_iterations-t),
MatrixPower,MatrixTemp[src],MatrixTemp[dst],
col,row,borderCols, borderRows, Rx_1,Ry_1,Rz_1,
step_div_Cap);
//cudaThreadSynchronize();
count++;
}
gettimeofday( &tv2, NULL);
double runtime = ((tv2.tv_sec+ tv2.tv_usec/1000000.0)-(tv1.tv_sec+ tv1.tv_usec/1000000.0));
printf("Runtime(seconds): %f\n", runtime);
printf("kernel has been executed for %d times\n", count);
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
run(argc,argv);
return( 0);
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations;
int pyramid_height; // number of iterations
/*
if (argc != 7) {
usage(argc, argv);
}
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0) {
usage(argc, argv);
}
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
*/
grid_rows = SIZE;
grid_cols = SIZE;
total_iterations = ITER;
pyramid_height = 2;
tfile=argv[1];
pfile=argv[2];
ofile=argv[3];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0) ? 0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0) ? 0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut) {
fatal("unable to allocate memory");
}
#ifdef VERBOSE
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
#endif
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
#ifdef VERBOSE
printf("Start computing the transient temperature\n");
#endif
/* Main computation */
int ret = compute_tran_temp( MatrixPower,MatrixTemp,grid_cols,grid_rows,
total_iterations,pyramid_height, blockCols,
blockRows, borderCols, borderRows);
#ifdef VERBOSE
printf("Ending simulation\n");
#endif
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
writeoutput( MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
40182e4d596bcb9929b9baf658c15c74d4190823.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
__constant__ float M[10];
int main(){
float h_M[]={1,2,3,4,5,7,8,9,0};
hipMemcpyToSymbol(M,h_M,10*sizeof(float));
std::cout<< "yo"<<std::endl;
}
| 40182e4d596bcb9929b9baf658c15c74d4190823.cu | #include<iostream>
__constant__ float M[10];
int main(){
float h_M[]={1,2,3,4,5,7,8,9,0};
cudaMemcpyToSymbol(M,h_M,10*sizeof(float));
std::cout<< "yo"<<std::endl;
}
|
b63e306593be6af2ebd6e2abdc4274b0b110eef1.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
Model::Model()
: isTraining(false), print_cost(false)
{
//int* a = (int*) malloc(sizeof(int) * 8);
checkCUDA(hipSetDevice(0));
checkCUDNN(cudnnCreate(&dnn));
checkCUDA(hipblasCreate(&blas));
workSpaceSize = WORK_SPACE_SIZE;
global_unique_id = 100;
checkCUDA(hipMalloc(&workSpace, workSpaceSize));
// printf("handle.workSpace = 0x%x\n", workSpace);
// create all descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&scaleTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc));
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// allocate tensors for measuring performance
checkCUDA(hipMalloc(&inputPtr, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&biasPtr, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&outputPtr, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&filterPtr, MAX_TENSOR_SIZE));
// create tensors for batch norm
checkCUDA(hipMalloc(&scalePtr, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&runningMean, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&runningVar, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&saveMean, MAX_TENSOR_SIZE));
checkCUDA(hipMalloc(&saveVar, MAX_TENSOR_SIZE));
// create cuda events
checkCUDA(hipEventCreate(&startEvent));
checkCUDA(hipEventCreate(&endEvent));
}
float Model::measure_oplist_runtime(const std::vector<OpBase*>& opBaseList)
{
const int num_runs = 100;
// warmup
for (int times = 0; times < num_runs; times++)
for (int i = 0; i < opBaseList.size(); i++)
opBaseList[i]->forward();
// measure runtime
// checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int times = 0; times < num_runs; times++) {
for (int i = 0; i < opBaseList.size(); i++)
opBaseList[i]->forward();
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
return milliseconds / num_runs;
}
void* Model::allocate_memory(size_t size, const DATATYPE* data_initial)
{
void* ptr;
checkCUDA(hipMalloc(&ptr, size));
if (data_initial != NULL) {
checkCUDA(hipMemcpy(ptr, data_initial, size, hipMemcpyDefault));
}
return ptr;
}
bool Model::copy_memory(DATATYPE* dst, const DATATYPE* src, size_t size)
{
checkCUDA(hipMemcpy(dst, src, size, hipMemcpyDefault));
return true;
}
| b63e306593be6af2ebd6e2abdc4274b0b110eef1.cu | /* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
Model::Model()
: isTraining(false), print_cost(false)
{
//int* a = (int*) malloc(sizeof(int) * 8);
checkCUDA(cudaSetDevice(0));
checkCUDNN(cudnnCreate(&dnn));
checkCUDA(cublasCreate(&blas));
workSpaceSize = WORK_SPACE_SIZE;
global_unique_id = 100;
checkCUDA(cudaMalloc(&workSpace, workSpaceSize));
// printf("handle.workSpace = 0x%x\n", workSpace);
// create all descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&scaleTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc));
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// allocate tensors for measuring performance
checkCUDA(cudaMalloc(&inputPtr, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&biasPtr, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&outputPtr, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&filterPtr, MAX_TENSOR_SIZE));
// create tensors for batch norm
checkCUDA(cudaMalloc(&scalePtr, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&runningMean, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&runningVar, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&saveMean, MAX_TENSOR_SIZE));
checkCUDA(cudaMalloc(&saveVar, MAX_TENSOR_SIZE));
// create cuda events
checkCUDA(cudaEventCreate(&startEvent));
checkCUDA(cudaEventCreate(&endEvent));
}
float Model::measure_oplist_runtime(const std::vector<OpBase*>& opBaseList)
{
const int num_runs = 100;
// warmup
for (int times = 0; times < num_runs; times++)
for (int i = 0; i < opBaseList.size(); i++)
opBaseList[i]->forward();
// measure runtime
// checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int times = 0; times < num_runs; times++) {
for (int i = 0; i < opBaseList.size(); i++)
opBaseList[i]->forward();
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
return milliseconds / num_runs;
}
void* Model::allocate_memory(size_t size, const DATATYPE* data_initial)
{
void* ptr;
checkCUDA(cudaMalloc(&ptr, size));
if (data_initial != NULL) {
checkCUDA(cudaMemcpy(ptr, data_initial, size, cudaMemcpyDefault));
}
return ptr;
}
bool Model::copy_memory(DATATYPE* dst, const DATATYPE* src, size_t size)
{
checkCUDA(cudaMemcpy(dst, src, size, cudaMemcpyDefault));
return true;
}
|
63c2fed75918e202abbfb18f600402c6a12fd34e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include <cuda/api_wrappers.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <thread>
using element_t = float;
using clock_value_t = long long;
__device__ void gpu_sleep(clock_value_t sleep_cycles)
{
clock_value_t start = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start; }
while (cycles_elapsed < sleep_cycles);
}
template <typename T>
__global__ void add(
const T* __restrict__ lhs,
const T* __restrict__ rhs,
T* __restrict__ result,
size_t length)
{
auto global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length) {
result[global_index] = lhs[global_index] + rhs[global_index];
gpu_sleep(200000);
}
}
/*
* Produce a launch configuration with one thread covering each element
*/
cuda::launch_configuration_t make_linear_launch_config(
const cuda::device_t device,
size_t length)
{
auto threads_per_block = device.properties().max_threads_per_block();
cuda::grid::dimension_t num_blocks =
(length / threads_per_block) +
(length % threads_per_block == 0 ? 0 : 1);
return cuda::make_launch_config(num_blocks, threads_per_block, cuda::no_shared_memory);
}
struct buffer_set_t {
cuda::memory::host::unique_ptr<element_t[]> host_lhs;
cuda::memory::host::unique_ptr<element_t[]> host_rhs;
cuda::memory::host::unique_ptr<element_t[]> host_result;
cuda::memory::device::unique_ptr<element_t[]> device_lhs;
cuda::memory::device::unique_ptr<element_t[]> device_rhs;
cuda::memory::device::unique_ptr<element_t[]> device_result;
};
std::vector<buffer_set_t> generate_buffers(
const cuda::device_t device,
size_t num_kernels,
size_t num_elements)
{
// TODO: This should be an std::array, but generating
// it is a bit tricky and I don't want to burden the example
// with template wizardry
std::vector<buffer_set_t> buffers;
std::generate_n(std::back_inserter(buffers), num_kernels,
[&]() {
return buffer_set_t {
// Sticking to C++11 here...
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements)
};
}
);
// TODO: Consider actually filling the buffers
return buffers;
}
int main(int argc, char **argv)
{
constexpr size_t num_kernels = 5;
constexpr size_t num_elements = 1e7;
auto device = cuda::device::current::get();
std::cout << "Using CUDA device " << device.name() << " (having ID " << device.id() << ")\n";
std::cout << "Generating host buffers... " << std::flush;
auto buffers = generate_buffers(device, num_kernels, num_elements);
std::cout << "done.\n" << std::flush;
std::vector<cuda::stream_t> streams;
streams.reserve(num_kernels);
std::generate_n(std::back_inserter(streams), num_kernels,
[&]() { return device.create_stream(cuda::stream::async); });
auto common_launch_config = make_linear_launch_config(device, num_elements);
auto buffer_size = num_elements * sizeof(element_t);
std::cout
<< "Running " << num_kernels << " sequences of HtoD-kernel-DtoH, in parallel" << std::endl;
// Unfortunately, we need to use indices here - unless we
// had access to a zip iterator (e.g. boost::zip_iterator)
for(size_t k = 0; k < num_kernels; k++) {
auto& stream = streams[k];
auto& buffer_set = buffers[k];
stream.enqueue.copy(buffer_set.device_lhs.get(), buffer_set.host_lhs.get(), buffer_size);
stream.enqueue.copy(buffer_set.device_rhs.get(), buffer_set.host_rhs.get(), buffer_size);
stream.enqueue.kernel_launch(
add<element_t>,
common_launch_config,
buffer_set.device_lhs.get(),
buffer_set.device_rhs.get(),
buffer_set.device_result.get(),
num_elements);
stream.enqueue.copy(buffer_set.host_result.get(), buffer_set.device_result.get(), buffer_size);
stream.enqueue.callback(
[k](cuda::stream_t, cuda::status_t status) {
std::cout
<< "Stream " << k+1 << " of " << num_kernels << " has concluded all work. " << std::endl;
}
);
}
std::this_thread::sleep_for(std::chrono::microseconds(50000));
for(auto& stream : streams) { stream.synchronize(); }
cuda::outstanding_error::ensure_none();
// TODO: Consider checking for correctness here
std::cout << "\nSUCCESS" << std::endl;
}
| 63c2fed75918e202abbfb18f600402c6a12fd34e.cu | /**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include <cuda/api_wrappers.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <thread>
using element_t = float;
using clock_value_t = long long;
__device__ void gpu_sleep(clock_value_t sleep_cycles)
{
clock_value_t start = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start; }
while (cycles_elapsed < sleep_cycles);
}
template <typename T>
__global__ void add(
const T* __restrict__ lhs,
const T* __restrict__ rhs,
T* __restrict__ result,
size_t length)
{
auto global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length) {
result[global_index] = lhs[global_index] + rhs[global_index];
gpu_sleep(200000);
}
}
/*
* Produce a launch configuration with one thread covering each element
*/
cuda::launch_configuration_t make_linear_launch_config(
const cuda::device_t device,
size_t length)
{
auto threads_per_block = device.properties().max_threads_per_block();
cuda::grid::dimension_t num_blocks =
(length / threads_per_block) +
(length % threads_per_block == 0 ? 0 : 1);
return cuda::make_launch_config(num_blocks, threads_per_block, cuda::no_shared_memory);
}
struct buffer_set_t {
cuda::memory::host::unique_ptr<element_t[]> host_lhs;
cuda::memory::host::unique_ptr<element_t[]> host_rhs;
cuda::memory::host::unique_ptr<element_t[]> host_result;
cuda::memory::device::unique_ptr<element_t[]> device_lhs;
cuda::memory::device::unique_ptr<element_t[]> device_rhs;
cuda::memory::device::unique_ptr<element_t[]> device_result;
};
std::vector<buffer_set_t> generate_buffers(
const cuda::device_t device,
size_t num_kernels,
size_t num_elements)
{
// TODO: This should be an std::array, but generating
// it is a bit tricky and I don't want to burden the example
// with template wizardry
std::vector<buffer_set_t> buffers;
std::generate_n(std::back_inserter(buffers), num_kernels,
[&]() {
return buffer_set_t {
// Sticking to C++11 here...
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::host::make_unique<element_t[]>(num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements),
cuda::memory::device::make_unique<element_t[]>(device, num_elements)
};
}
);
// TODO: Consider actually filling the buffers
return buffers;
}
int main(int argc, char **argv)
{
constexpr size_t num_kernels = 5;
constexpr size_t num_elements = 1e7;
auto device = cuda::device::current::get();
std::cout << "Using CUDA device " << device.name() << " (having ID " << device.id() << ")\n";
std::cout << "Generating host buffers... " << std::flush;
auto buffers = generate_buffers(device, num_kernels, num_elements);
std::cout << "done.\n" << std::flush;
std::vector<cuda::stream_t> streams;
streams.reserve(num_kernels);
std::generate_n(std::back_inserter(streams), num_kernels,
[&]() { return device.create_stream(cuda::stream::async); });
auto common_launch_config = make_linear_launch_config(device, num_elements);
auto buffer_size = num_elements * sizeof(element_t);
std::cout
<< "Running " << num_kernels << " sequences of HtoD-kernel-DtoH, in parallel" << std::endl;
// Unfortunately, we need to use indices here - unless we
// had access to a zip iterator (e.g. boost::zip_iterator)
for(size_t k = 0; k < num_kernels; k++) {
auto& stream = streams[k];
auto& buffer_set = buffers[k];
stream.enqueue.copy(buffer_set.device_lhs.get(), buffer_set.host_lhs.get(), buffer_size);
stream.enqueue.copy(buffer_set.device_rhs.get(), buffer_set.host_rhs.get(), buffer_size);
stream.enqueue.kernel_launch(
add<element_t>,
common_launch_config,
buffer_set.device_lhs.get(),
buffer_set.device_rhs.get(),
buffer_set.device_result.get(),
num_elements);
stream.enqueue.copy(buffer_set.host_result.get(), buffer_set.device_result.get(), buffer_size);
stream.enqueue.callback(
[k](cuda::stream_t, cuda::status_t status) {
std::cout
<< "Stream " << k+1 << " of " << num_kernels << " has concluded all work. " << std::endl;
}
);
}
std::this_thread::sleep_for(std::chrono::microseconds(50000));
for(auto& stream : streams) { stream.synchronize(); }
cuda::outstanding_error::ensure_none();
// TODO: Consider checking for correctness here
std::cout << "\nSUCCESS" << std::endl;
}
|
98521e8f482c46f85c5e6dfebbdadd9b2614c850.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <errno.h>
#include <unistd.h>
#include <cutil_inline.h>
// Variables
int* h_A = NULL;
int* h_B = NULL;
int* h_C = NULL;
int* d_A = NULL;
int* d_B = NULL;
int* d_C = NULL;
int N = 10;
unsigned int seed = 0x1234567;
timespec start_time;
timespec end_time;
void array_mul(int* A, int* B, int* C, int N)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
for(int k = 0; k < N; k++)
{
C[i*N+j] = C[i*N+j] + A[i*N+k] * B[k*N+j];
}
}
}
};
__global__ void MatrixMulKernel(int* d_A, int* d_B, int* d_C, int width){
int Cvalue=0;
for(int k=0; k<width; ++k){
int Aelement = d_A[blockIdx.x*width+k];
int Belement = d_B[k*width+threadIdx.y];
Cvalue += Aelement * Belement;
}
d_C[blockIdx.x*width+threadIdx.y] = Cvalue;
}
unsigned int myrand(unsigned int *seed, unsigned int input)
{
*seed = (*seed << 13) ^ (*seed >> 15) + input + 0xa174de3;
return *seed;
};
void sig_check()
{
unsigned int sig = 0x1234567;
for(int i = 0; i < N; i++)
{
myrand(&sig, h_C[i]);
}
printf("Computed check sum signature:0x%08x\n", sig);
if(sig == 0x9f3afc72)
printf("Result check by signature successful!!\n");
else
printf("Result check by signature failed!!\n");
}
void show_array(int* array)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
printf("%13d, ", array[i*N+j]);
printf("\n");
}
printf("\n");
}
int main (int argc, char *argv[])
{
// get the dimension of array
assert(argc == 2);
N = atoi(argv[1]);
int size = N*N*sizeof(int);
printf("N:%d, size:%d\n", N, size);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
assert(h_A);
assert(h_B);
assert(h_C);
// initial array A & B
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
{
h_A[i*N+j] = myrand(&seed, i*j) & 0xff;
h_B[i*N+j] = myrand(&seed, i*j) & 0xff;
h_C[i*N+j] = 0;
}
// Allocate and copy
hipMalloc(&d_A, size);
hipMalloc(&d_B, size);
hipMalloc(&d_C, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
clock_gettime(CLOCK_REALTIME, &start_time);
// Invoke kernel
dim3 dimGrid(1, 1);
dim3 dimBlock(N, N);
// Launch the device computation
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//array_mul(h_A, h_B, h_C, N);
clock_gettime(CLOCK_REALTIME, &end_time);
printf("sizeof(start_time.tv_sec):%d, sizeof(start_time.tv_nsec):%d\n", sizeof(start_time.tv_sec), sizeof(start_time.tv_nsec));
printf("s_time.tv_sec:%d, s_time.tv_nsec:%d\n", start_time.tv_sec, start_time.tv_nsec);
printf("e_time.tv_sec:%d, e_time.tv_nsec:%d\n", end_time.tv_sec, end_time.tv_nsec);
double execution_time = (double)end_time.tv_sec + (double)end_time.tv_nsec/1000000000.0
- (double)start_time.tv_sec - (double)start_time.tv_nsec/1000000000.0;
printf("diff_time:%.4f(s)\n", execution_time);
//show_array(h_A);
//show_array(h_B);
//show_array(h_C);
sig_check();
return 0;
}
| 98521e8f482c46f85c5e6dfebbdadd9b2614c850.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <errno.h>
#include <unistd.h>
#include <cutil_inline.h>
// Variables
int* h_A = NULL;
int* h_B = NULL;
int* h_C = NULL;
int* d_A = NULL;
int* d_B = NULL;
int* d_C = NULL;
int N = 10;
unsigned int seed = 0x1234567;
timespec start_time;
timespec end_time;
void array_mul(int* A, int* B, int* C, int N)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
for(int k = 0; k < N; k++)
{
C[i*N+j] = C[i*N+j] + A[i*N+k] * B[k*N+j];
}
}
}
};
__global__ void MatrixMulKernel(int* d_A, int* d_B, int* d_C, int width){
int Cvalue=0;
for(int k=0; k<width; ++k){
int Aelement = d_A[blockIdx.x*width+k];
int Belement = d_B[k*width+threadIdx.y];
Cvalue += Aelement * Belement;
}
d_C[blockIdx.x*width+threadIdx.y] = Cvalue;
}
unsigned int myrand(unsigned int *seed, unsigned int input)
{
*seed = (*seed << 13) ^ (*seed >> 15) + input + 0xa174de3;
return *seed;
};
void sig_check()
{
unsigned int sig = 0x1234567;
for(int i = 0; i < N; i++)
{
myrand(&sig, h_C[i]);
}
printf("Computed check sum signature:0x%08x\n", sig);
if(sig == 0x9f3afc72)
printf("Result check by signature successful!!\n");
else
printf("Result check by signature failed!!\n");
}
void show_array(int* array)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
printf("%13d, ", array[i*N+j]);
printf("\n");
}
printf("\n");
}
int main (int argc, char *argv[])
{
// get the dimension of array
assert(argc == 2);
N = atoi(argv[1]);
int size = N*N*sizeof(int);
printf("N:%d, size:%d\n", N, size);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
assert(h_A);
assert(h_B);
assert(h_C);
// initial array A & B
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
{
h_A[i*N+j] = myrand(&seed, i*j) & 0xff;
h_B[i*N+j] = myrand(&seed, i*j) & 0xff;
h_C[i*N+j] = 0;
}
// Allocate and copy
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
clock_gettime(CLOCK_REALTIME, &start_time);
// Invoke kernel
dim3 dimGrid(1, 1);
dim3 dimBlock(N, N);
// Launch the device computation
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//array_mul(h_A, h_B, h_C, N);
clock_gettime(CLOCK_REALTIME, &end_time);
printf("sizeof(start_time.tv_sec):%d, sizeof(start_time.tv_nsec):%d\n", sizeof(start_time.tv_sec), sizeof(start_time.tv_nsec));
printf("s_time.tv_sec:%d, s_time.tv_nsec:%d\n", start_time.tv_sec, start_time.tv_nsec);
printf("e_time.tv_sec:%d, e_time.tv_nsec:%d\n", end_time.tv_sec, end_time.tv_nsec);
double execution_time = (double)end_time.tv_sec + (double)end_time.tv_nsec/1000000000.0
- (double)start_time.tv_sec - (double)start_time.tv_nsec/1000000000.0;
printf("diff_time:%.4f(s)\n", execution_time);
//show_array(h_A);
//show_array(h_B);
//show_array(h_C);
sig_check();
return 0;
}
|
831b6ebb24a6d6e532a9f6371c5f11ae958768da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_right;
int xdim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_right;
int ydim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_right;
int xdim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_right;
int ydim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_2_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_2_right * \
ydim0_update_halo_kernel2_zvel_plus_2_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_2_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_2_right * \
ydim1_update_halo_kernel2_zvel_plus_2_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_2_right_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_right *
ydim0_update_halo_kernel2_zvel_plus_2_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_right *
ydim1_update_halo_kernel2_zvel_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "update_halo_kernel2_zvel_plus_2_right");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_right_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_2_right_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_2_right_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_2_right_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_right), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 54;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 54;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(54, "update_halo_kernel2_zvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 831b6ebb24a6d6e532a9f6371c5f11ae958768da.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_right;
int xdim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_right;
int ydim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_right;
int xdim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_right;
int ydim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_2_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_2_right * \
ydim0_update_halo_kernel2_zvel_plus_2_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_2_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_2_right * \
ydim1_update_halo_kernel2_zvel_plus_2_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_2_right_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_right *
ydim0_update_halo_kernel2_zvel_plus_2_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_right *
ydim1_update_halo_kernel2_zvel_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "update_halo_kernel2_zvel_plus_2_right");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_right_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_2_right_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_2_right_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_2_right_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_zvel_plus_2_right<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 54;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 54;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(54, "update_halo_kernel2_zvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
a567d4e829804d8873aab8491007f484a58e8330.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeOpKernel(const T* in,
const I* indicator,
size_t row,
size_t col,
T* out) {
size_t id_num = row * col;
CUDA_KERNEL_LOOP(k, id_num) {
size_t i = k / col;
size_t j = k % col;
I rrow = indicator[i];
out[k] = in[rrow * col + j];
}
}
} // namespace
template <typename T, typename I>
class TakeGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor feature, indicator, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &feature));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
auto fea_dims = feature.Shape().Dims();
std::vector<size_t> dims(fea_dims.begin(), fea_dims.end());
dims[0] = indicator.Shape().NumElements();
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
size_t row = dims[0];
size_t col = feature.Shape().NumElements() / feature.Shape()[0];
T* pin = feature.Raw<T>(), *pout = output.Raw<T>();
I* pind = indicator.Raw<I>();
hipStream_t st = stream->GetInternal();
CUDA_CHECK(hipMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
hipLaunchKernelGGL(( TakeOpKernel<T, I>),
dim3(CUDA_GET_BLOCKS(row * col)),
dim3(CUDA_NUM_THREADS),
0,
st, pin, pind, row, col, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeOp, TakeGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| a567d4e829804d8873aab8491007f484a58e8330.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeOpKernel(const T* in,
const I* indicator,
size_t row,
size_t col,
T* out) {
size_t id_num = row * col;
CUDA_KERNEL_LOOP(k, id_num) {
size_t i = k / col;
size_t j = k % col;
I rrow = indicator[i];
out[k] = in[rrow * col + j];
}
}
} // namespace
template <typename T, typename I>
class TakeGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor feature, indicator, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &feature));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
auto fea_dims = feature.Shape().Dims();
std::vector<size_t> dims(fea_dims.begin(), fea_dims.end());
dims[0] = indicator.Shape().NumElements();
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
size_t row = dims[0];
size_t col = feature.Shape().NumElements() / feature.Shape()[0];
T* pin = feature.Raw<T>(), *pout = output.Raw<T>();
I* pind = indicator.Raw<I>();
cudaStream_t st = stream->GetInternal();
CUDA_CHECK(cudaMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
TakeOpKernel<T, I><<<
CUDA_GET_BLOCKS(row * col),
CUDA_NUM_THREADS,
0,
st>>>(pin, pind, row, col, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeOp, TakeGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
cd19a7f0a97fc60fc89fd8c24fb6e9e58d9ef0a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <time.h>
#include <chrono>
#include <iomanip>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define _USE_MATH_DEFINES
#define MAX_MOVES 32
#define MINIMAX_MAXVAL 50000
#define MINIMAX_MINVAL -50000
#define MASK 0xFFFFFFFF
using namespace std;
using namespace std::chrono;
//---------------------------------------------Funkcje GPU------------------------------------------------------------------
//-------------Szukanie pionkow gracza na planszy-----------------
__global__ void kernelFindPawns(int* board, int* pawns_id, bool whose_move)
{
if(threadIdx.x < 64) {
int pawn_pos = 1000;
if(whose_move == 0 && board[threadIdx.x] > 0 && board[threadIdx.x] < 10)
pawn_pos = threadIdx.x;
else if(whose_move == 1 && board[threadIdx.x] > 10)
pawn_pos = threadIdx.x;
pawns_id[threadIdx.x] = pawn_pos;
}
}
//-------------Wyznaczanie mozliwych ruchow-----------------
__inline__ __device__
bool checkMovePawn(int* board, int start_pos, int end_pos, bool whose_move)
{
int end_field = board[end_pos];
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(whose_move == 0){
if(start_col == end_col) {
if(end_row == start_row - 1 && end_field == 0)
move_possible = 1;
else if(end_row == start_row - 2 && end_field == 0 && start_row == 6) {
if(board[(start_row - 1)*8 + end_col] == 0)
move_possible = 1;
}
}
else if(abs(start_col - end_col) == 1) {
if(end_row == start_row - 1 && end_field != 0)
move_possible = 1;
}
}
else{
if(start_col == end_col) {
if(end_row == start_row + 1 && end_field == 0)
move_possible = 1;
else if(end_row == start_row + 2 && end_field == 0 && start_row == 1) {
if(board[(start_row + 1)*8 + end_col] == 0)
move_possible = 1;
}
}
else if(abs(start_col - end_col) == 1) {
if(end_row == start_row + 1 && end_field != 0)
move_possible = 1;
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveKnight(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(abs(start_col - end_col) == 2 && abs(start_row - end_row) == 1)
move_possible = 1;
else if(abs(start_col - end_col) == 1 && abs(start_row - end_row) == 2)
move_possible = 1;
return move_possible;
}
__inline__ __device__
bool checkMoveRook(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(end_col == start_col) {
move_possible = 1;
int delta_row = end_row - start_row;
int row_mult = 1;
if(delta_row < 0)
row_mult = -1;
else
delta_row++;
for(int i = 1; i < abs(delta_row); i++) {
int id = (start_row + i * row_mult) * 8 + end_col;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
else if(end_row == start_row) {
move_possible = 1;
int delta_col = end_col - start_col;
int col_mult = 1;
if(delta_col < 0)
col_mult = -1;
else
delta_col++;
for(int i = 1; i < abs(delta_col); i++) {
int id = start_row * 8 + start_col + i * col_mult;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveBishop(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
int delta_row = end_row - start_row;
int delta_col = end_col - start_col;
bool move_possible = 0;
if(abs(delta_row) == abs(delta_col)) {
move_possible = 1;
int row_mult = 1;
int col_mult = 1;
if(delta_row < 0)
row_mult = -1;
if(delta_col < 0)
col_mult = -1;
for(int i = 1; i < abs(delta_row); i++) {
int id = (start_row + i * row_mult) * 8 + start_col + i * col_mult;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveQueen(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(start_col == end_col || start_row == end_row)
move_possible = checkMoveRook(board, start_pos, end_pos);
else if(abs(start_row - end_row) == abs(start_col - end_col))
move_possible = checkMoveBishop(board, start_pos, end_pos);
return move_possible;
}
__inline__ __device__
bool checkMoveKing(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(abs(start_col - end_col) == 1 ||abs(start_col - end_col) == 0) {
if(abs(start_row - end_row) == 1 || abs(start_row - end_row) == 0)
move_possible = 1;
}
return move_possible;
}
__global__ void kernelCheckAllMoves(int* board, int* moves, int start_id, bool whose_move)
{
int pawn = board[start_id] % 10;
if(threadIdx.x < 64) {
int end_id = threadIdx.x;
bool move_possible;
if(whose_move == 0 && board[end_id] > 0 && board[end_id] < 10)
move_possible = 0;
else if(whose_move == 1 && board[end_id] > 10)
move_possible = 0;
else {
if(pawn == 1)
move_possible = checkMovePawn(board, start_id, end_id, whose_move);
else if(pawn == 2)
move_possible = checkMoveRook(board, start_id, end_id);
else if(pawn == 3)
move_possible = checkMoveBishop(board, start_id, end_id);
else if(pawn == 4)
move_possible = checkMoveKnight(board, start_id, end_id);
else if(pawn == 5)
move_possible = checkMoveQueen(board, start_id, end_id);
else if(pawn == 6)
move_possible = checkMoveKing(board, start_id, end_id);
}
int ret_val = 1000;
if(move_possible == 1)
ret_val = threadIdx.x;
moves[threadIdx.x] = ret_val;
}
}
__global__ void kernelCheckMove(int* board, int* move, int start_id, int end_id, bool whose_move)
{
int pawn = board[start_id] % 10;
if(threadIdx.x == 0) {
bool move_possible;
if(whose_move == 0 && board[end_id] > 0 && board[end_id] < 10)
move_possible = 0;
else if(whose_move == 1 && board[end_id] > 10)
move_possible = 0;
else {
if(pawn == 1)
move_possible = checkMovePawn(board, start_id, end_id, whose_move);
else if(pawn == 2)
move_possible = checkMoveRook(board, start_id, end_id);
else if(pawn == 3)
move_possible = checkMoveBishop(board, start_id, end_id);
else if(pawn == 4)
move_possible = checkMoveKnight(board, start_id, end_id);
else if(pawn == 5)
move_possible = checkMoveQueen(board, start_id, end_id);
else if(pawn == 6)
move_possible = checkMoveKing(board, start_id, end_id);
}
int ret_val = 1000;
if(move_possible == 1)
ret_val = threadIdx.x;
move[threadIdx.x] = ret_val;
}
}
//--------------------Obliczanie punktow planszy-------------------
__inline__ __device__
int getPointsPawn(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 10;
if(col == 0 || col == 7)
pos -= 2;
if(field_type < 10) {
if(row == 1)
points += 30;
else if(row == 0)
points += 70;
}
else {
if(row == 6)
points += 30;
else if(row == 7)
points += 70;
}
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsBishop(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 30;
if(row == 0 || row == 7)
points -= 4;
else if(row >= 2 && row <= 5)
points += 4;
if(col == 0 || col == 7)
points -= 4;
else if(col >= 2 && col <= 5)
points += 4;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsKnight(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 30;
if(whose_move == 0 && row == 0)
points -= 8;
else if(whose_move == 0 && row == 7)
points -= 8;
else if(row == 0 || row == 7)
points -= 4;
else if(row >= 2 && row <= 5)
points += 4;
if(col == 0 || col == 7)
points -= 4;
else if(col >= 2 && col <= 5)
points += 4;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsRook(int pos, int field_type, bool whose_move)
{
//int row = pos / 8;
int col = pos % 8;
int points = 50;
if(col == 0 || col == 7)
pos -= 6;
else if(col == 1 || col == 6)
pos -= 3;
else if(col == 3 || col == 4)
pos += 3;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsQueen(int pos, int field_type, bool whose_move)
{
//int row = pos / 8;
int col = pos % 8;
int points = 90;
if(col == 0 || col == 7)
points -= 5;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsKing(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 1500;
if(row > 1 && row < 6)
points -= 20;
else if(row == 1 || row == 6)
points -= 5;
if(col == 3 || col == 4)
points -= 5;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int warpReductionPoints(int value) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
value += __shfl_down_sync(MASK, value, offset);
return value;
}
__inline__ __device__
int blockReductionPoints(int value)
{
static __shared__ double shared[32];
int w_id = threadIdx.x / warpSize;
int t_id = threadIdx.x % warpSize;
value = warpReductionPoints(value);
if(t_id == 0)
shared[w_id] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[t_id] : 0;
//Finalna redukcja w pierwszym warpie
if(w_id == 0)
value = warpReductionPoints(value);
return value;
}
__global__ void kernelEvalPoints(const int* board, int* points_out, bool whose_move)
{
if(threadIdx.x < 64) {
int pos_id = threadIdx.x;
int field = board[pos_id];
int points = 0;
if(field == 0)
points = 0;
else if(field % 10 == 1)
points = getPointsPawn(pos_id, field, whose_move);
else if(field % 10 == 2)
points = getPointsRook(pos_id, field, whose_move);
else if(field % 10 == 3)
points = getPointsBishop(pos_id, field, whose_move);
else if(field % 10 == 4)
points = getPointsKnight(pos_id, field, whose_move);
else if(field % 10 == 5)
points = getPointsQueen(pos_id, field, whose_move);
else if(field % 10 == 6)
points = getPointsKing(pos_id, field, whose_move);
points = blockReductionPoints(points);
if(threadIdx.x == 0)
points_out[blockIdx.x] = points;
}
}
//----------------------------Minimax-------------------------------------------
__global__ void kernelMax(int* max, int* points, unsigned int rozmiar)
{
__shared__ int max_values[MAX_MOVES];
int t_id = threadIdx.x;
unsigned int b_id = blockIdx.x * blockDim.x + threadIdx.x;
max_values[t_id] = points[b_id];
__syncthreads();
for (int i = 1; i < MAX_MOVES; i *= 2) {
if((t_id + i) < MAX_MOVES && max_values[t_id + i] != MINIMAX_MAXVAL) {
if (max_values[t_id + i] > max_values[t_id])
max_values[t_id] = max_values[t_id + i];
}
__syncthreads();
}
if (t_id == 0)
max[blockIdx.x] = max_values[t_id];
}
__global__ void kernelMin(int* min, int* points, unsigned int rozmiar)
{
__shared__ int min_values[MAX_MOVES];
int t_id = threadIdx.x;
unsigned int b_id = blockIdx.x * blockDim.x + threadIdx.x;
min_values[t_id] = points[b_id];
__syncthreads();
for (int i = 1; i < MAX_MOVES; i *= 2) {
if((t_id + i) < MAX_MOVES) {
if (min_values[t_id + i] < min_values[t_id] && min_values[t_id + i] != MINIMAX_MINVAL)
min_values[t_id] = min_values[t_id + i];
}
__syncthreads();
}
if (t_id == 0)
min[blockIdx.x] = min_values[t_id];
}
//------------------------------------------------Funkcje globalne----------------------------------------------------------
void resetuj_plansze(int** plansza, int* gracze)
{
/*Oznaczenia pionkow:
0 - puste pole
1/11 - pion gracza bialego/czarnego
2/12 - wieza gracza bialego/czarnego
3/13 - goniec gracza bialego/czarnego
4/14 - skoczek gracza bialego/czarnego
5/15 - hetman gracza bialego/czarnego
6/16 - krol gracza bialego/czarnego
*/
for(int i = 0; i < 8; i++) {
for(int j = 0; j < 8; j++) {
int id = 0;
//Dwa gorne rzedy na pionki gracza czarnego
if(i == 0) {
if(j == 0 || j == 7)
id = 12;
else if(j == 1 || j == 6)
id = 14;
else if(j == 2 || j == 5)
id = 13;
else if(j == 3)
id = 15;
else if(j == 4)
id = 16;
}
else if(i == 1) {
id = 11;
}
//Dwa dolne rzedy na pionki gracza bialego
else if(i == 7) {
if(j == 0 || j == 7)
id = 2;
else if(j == 1 || j == 6)
id = 4;
else if(j == 2 || j == 5)
id = 3;
else if(j == 3)
id = 5;
else if(j == 4)
id = 6;
}
else if(i == 6) {
id = 1;
}
//Reszta pol bez pionkow
plansza[i][j] = id;
}
}
while(true) {
system("clear");
int wartosc = -1;
cout << "Podaj gracza biaego (0 - czowiek, 1 - AI)" << endl;
cin >> wartosc;
if(wartosc == 0 || wartosc == 1) {
gracze[0] = wartosc;
break;
}
}
while(true) {
system("clear");
int wartosc = -1;
cout << "Podaj gracza czarnego (0 - czowiek, 1 - AI)" << endl;
cin >> wartosc;
if(wartosc == 0 || wartosc == 1) {
gracze[1] = wartosc;
break;
}
}
}
void rysuj_plansze(int** plansza)
{
cout << "=||";
for(int i = 0; i < 8; i++) {
cout << "===|";
}
cout << "|";
cout << endl;
for(int i = 0; i < 8; i++) {
cout << 8 - i;
cout << "||";
for(int j = 0; j < 8; j++) {
int id = plansza[i][j];
char rysuj = ' ';
if(id % 10 == 1) {
if(id < 10)
rysuj = 'P';
else
rysuj = 'p';
}
else if(id % 10 == 2) {
if(id < 10)
rysuj = 'R';
else
rysuj = 'r';
}
else if(id % 10 == 3) {
if(id < 10)
rysuj = 'B';
else
rysuj = 'b';
}
else if(id % 10 == 4) {
if(id < 10)
rysuj = 'N';
else
rysuj = 'n';
}
else if(id % 10 == 5) {
if(id < 10)
rysuj = 'Q';
else
rysuj = 'q';
}
else if(id % 10 == 6) {
if(id < 10)
rysuj = 'K';
else
rysuj = 'k';
}
cout << " " << rysuj << " " << "|";
}
cout << "|";
//Wypisywanie legendy
switch(i) {
case 0:
cout << " Oznaczenia:";
break;
case 1:
cout << " male litery - pionki czarne";
break;
case 2:
cout << " R - wieza";
break;
case 3:
cout << " N - skoczek";
break;
case 4:
cout << " K - krol";
break;
}
cout << endl;
if(i < 7) {
cout << "-++";
for(int k = 0; k < 8; k++) {
cout << "---+";
}
cout << "+";
}
else {
cout << "=++";
for(int k = 0; k < 8; k++) {
cout << "===+";
}
cout << "+";
}
switch(i) {
case 0:
cout << " DUZE LITERY - pionki biale";
break;
case 1:
cout << " P - pion";
break;
case 2:
cout << " B - goniec";
break;
case 3:
cout << " Q - hetman";
break;
}
cout << endl;
}
cout << " ||";
for(int i = 0; i < 8; i++) {
cout << " " << char('A' + i) << " " << "|";
}
cout << "|";
cout << endl << endl;
}
//Zamiana 2 pierwszych znakow wczytanego od gracza stringa na indeksy rzedow i kolumn tablicy
//Jesli dowolny indeks wykracza poza tablice(indeks = <0, 7>) zwracamy wartosc false
bool string_na_pole(int** plansza, string pole_str, int* pole)
{
int kolumna = int(tolower(pole_str[0])) - (int)'a';
int rzad = (int)'8' - (int)pole_str[1];
if(rzad >= 0 && rzad <= 7)
pole[0] = rzad;
else
return 0;
if(kolumna >= 0 && kolumna <= 7)
pole[1] = kolumna;
else
return 0;
return 1;
}
string pole_na_string(int** plansza, int* pole)
{
string pole_str;
pole_str[0] = char(pole[1] + 65);
pole_str[1] = char(56 - pole[0]);
return pole_str;
}
//Zwraca typ pionka (0 jesli pionek jest nieprawidlowy)
int sprawdz_pionek(int** plansza, int* pole, bool czyj_ruch)
{
int kolumna = pole[1];
int rzad = pole[0];
int pionek = plansza[rzad][kolumna];
if(pionek == 0)
return 0;
else if(pionek > 0 && pionek < 10 && czyj_ruch == 0)
return pionek % 10;
else if(pionek > 10 && czyj_ruch == 1)
return pionek % 10;
return 0;
}
//Zwraca nazwe pionka na wybranym polu
string nazwa_pola(int** plansza, int* pole) {
int pole_doc_typ = plansza[pole[0]][pole[1]];
if(pole_doc_typ == 0)
return "Puste pole";
else if(pole_doc_typ % 10 == 1)
return "Pion";
else if(pole_doc_typ % 10 == 2)
return "Wieza";
else if(pole_doc_typ % 10 == 3)
return "Goniec";
else if(pole_doc_typ % 10 == 4)
return "Skoczek";
else if(pole_doc_typ % 10 == 5)
return "Hetman";
else if(pole_doc_typ % 10 == 6)
return "Krol";
return "Puste pole";
}
void wybierz_rozpoczecie(int** plansza, int* pole_pocz, int* pole_doc, bool czyj_ruch)
{
int losuj = rand() % 4;
if(czyj_ruch == 0) {
switch(losuj) {
case 0:
pole_pocz[0] = 6;
pole_pocz[1] = 4;
pole_doc[0] = 4;
pole_doc[1] = 4;
break;
case 1:
pole_pocz[0] = 6;
pole_pocz[1] = 3;
pole_doc[0] = 4;
pole_doc[1] = 3;
break;
case 2:
pole_pocz[0] = 7;
pole_pocz[1] = 6;
pole_doc[0] = 5;
pole_doc[1] = 5;
break;
case 3:
pole_pocz[0] = 7;
pole_pocz[1] = 1;
pole_doc[0] = 5;
pole_doc[1] = 2;
break;
}
}
else {
switch(losuj) {
case 0:
pole_pocz[0] = 1;
pole_pocz[1] = 4;
pole_doc[0] = 3;
pole_doc[1] = 4;
break;
case 1:
pole_pocz[0] = 1;
pole_pocz[1] = 3;
pole_doc[0] = 3;
pole_doc[1] = 3;
break;
case 2:
pole_pocz[0] = 0;
pole_pocz[1] = 6;
pole_doc[0] = 2;
pole_doc[1] = 5;
break;
case 3:
pole_pocz[0] = 0;
pole_pocz[1] = 1;
pole_doc[0] = 2;
pole_doc[1] = 2;
break;
}
}
}
//GPU tworzenie grafu
void licz_graf_GPU(int* plansza, vector<int> &punkty, int depth, int max_depth, bool czyj_ruch, bool czy_wezel_niepusty)
{
if(depth == max_depth) {
int punkty_wezel = 0;
int* h_punkty = new int[64];
bool czyj_ruch_nowy = 0;
if(depth % 2 == 0)
czyj_ruch_nowy = czyj_ruch;
else
czyj_ruch_nowy = !czyj_ruch;
if(czy_wezel_niepusty == 1) {
int* h_ruchy = new int[64];
int* d_punkty;
int* d_plansza;
hipMalloc((void**)&d_plansza, 64 * sizeof(int));
hipMalloc((void**)&d_punkty, 64 * sizeof(int));
hipMemcpy(d_plansza, plansza, 64 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelEvalPoints), dim3(1),dim3(64), 0, 0, d_plansza, d_punkty, czyj_ruch_nowy);
hipDeviceSynchronize();
hipMemcpy(h_punkty, d_punkty, sizeof(int), hipMemcpyDeviceToHost);
punkty_wezel = h_punkty[0];
delete[] h_punkty;
hipFree(d_punkty);
hipFree(d_plansza);
}
else {
if(max_depth % 2 == 1)
punkty_wezel = MINIMAX_MINVAL;
else
punkty_wezel = MINIMAX_MAXVAL;
}
punkty.push_back(punkty_wezel);
}
else if(czy_wezel_niepusty == 0) {
for(int i = 0; i < 32; i++) {
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 0);
}
}
else {
//Znajdowanie pionkow
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
hipMalloc((void**)&d_plansza, 64 * sizeof(int));
hipMalloc((void**)&d_ruchy, 64 * sizeof(int));
hipMemcpy(d_plansza, plansza, 64 * sizeof(int), hipMemcpyHostToDevice);
vector<int> pionki;
hipLaunchKernelGGL(( kernelFindPawns), dim3(1),dim3(64), 0, 0, d_plansza, d_ruchy, czyj_ruch);
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int i = 0; i < 16; i++) {
if(thrust_tab[i] < 64) {
pionki.push_back(thrust_tab[i]);
}
else
break;
}
//Szukanie mozliwych ruchow
vector<int> ruchy_start;
vector<int> ruchy_doc;
for(int i = 0; i < pionki.size(); i++) {
if(ruchy_start.size() >= 32)
break;
hipLaunchKernelGGL(( kernelCheckAllMoves), dim3(1),dim3(64), 0, 0, d_plansza, d_ruchy, pionki[i], czyj_ruch);
hipDeviceSynchronize();
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int j = 0; j < 64; j++) {
if(thrust_tab[j] < 64) {
ruchy_start.push_back(pionki[i]);
ruchy_doc.push_back(thrust_tab[j]);
}
else
break;
}
}
delete[] h_ruchy;
hipFree(d_ruchy);
hipFree(d_plansza);
//Rozszerzanie grafu dla wyliczonych ruchow
for(int i = 0; i < 32; i++) {
if(i >= ruchy_start.size())
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 0);
else{
int start_id = ruchy_start[i];
int doc_id = ruchy_doc[i];
int pole_start_typ = plansza[start_id];
int pole_doc_typ = plansza[doc_id];
plansza[doc_id] = pole_start_typ;
plansza[start_id] = 0;
//Wybor przez bota hetmana w razie promocji
if(pole_start_typ == 0 && (doc_id/8 == 0 || doc_id/8 == 7)) {
int pole_prom = 0;
if(czyj_ruch == 0)
pole_prom = 5;
else
pole_prom = 15;
plansza[doc_id] = pole_prom;
}
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 1);
plansza[doc_id] = pole_doc_typ;
plansza[start_id] = pole_start_typ;
}
}
}
}
void znajdz_najlepszy_ruch(int* plansza, vector<int> punkty, int* max_ruch, bool czyj_ruch)
{
//Znajdowanie pionkow
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
hipMalloc((void**)&d_plansza, 64 * sizeof(int));
hipMalloc((void**)&d_ruchy, 64 * sizeof(int));
hipMemcpy(d_plansza, plansza, 64 * sizeof(int), hipMemcpyHostToDevice);
vector<int> pionki;
hipLaunchKernelGGL(( kernelFindPawns), dim3(1),dim3(64), 0, 0, d_plansza, d_ruchy, czyj_ruch);
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int i = 0; i < 16; i++) {
if(thrust_tab[i] < 64) {
pionki.push_back(thrust_tab[i]);
}
else
break;
}
//Szukanie mozliwych ruchow
vector<int> ruchy_start;
vector<int> ruchy_doc;
for(int i = 0; i < pionki.size(); i++) {
if(ruchy_start.size() >= 32)
break;
hipLaunchKernelGGL(( kernelCheckAllMoves), dim3(1),dim3(64), 0, 0, d_plansza, d_ruchy, pionki[i], czyj_ruch);
hipDeviceSynchronize();
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int j = 0; j < 64; j++) {
if(thrust_tab[j] < 64) {
ruchy_start.push_back(pionki[i]);
ruchy_doc.push_back(thrust_tab[j]);
}
else
break;
}
}
delete[] h_ruchy;
hipFree(d_ruchy);
hipFree(d_plansza);
int max_val = MINIMAX_MINVAL;
int max_id_start = 0;
int max_id_doc = 0;
for(int i = 0; i < MAX_MOVES; i++) {
if(punkty[i] >= max_val) {
max_val = punkty[i];
max_id_start = ruchy_start[i];
max_id_doc = ruchy_doc[i];
}
}
max_ruch[0] = max_id_start;
max_ruch[1] = max_id_doc;
}
//--------------------------------------------Main------------------------------------------------------------------
int main()
{
srand(time(NULL));
//Tworzenie dwuwymiarowej tablicy 8x8 odpowiadajacej za figury znajdujace sie na polach
int** plansza = new int* [8];
int* gracze = new int[2];
for (int i = 0; i < 8; i++) {
plansza[i] = new int[8];
}
resetuj_plansze(plansza, gracze);
bool czyj_ruch = 0; //0 - biale, 1 - czarne
int runda = 1;
while(true) {
system("clear");
cout << "Wpisz 'r' w celu zresetowania gry" << endl << endl << endl;
//Rysowanie planszy i calego "GUI"
rysuj_plansze(plansza);
if(czyj_ruch == 0)
cout << "Obecny ruch - gracz BIALY" << endl << endl;
else
cout << "Obecny ruch - gracz CZARNY" << endl << endl;
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
hipMalloc((void**)&d_plansza, 64 * sizeof(int));
hipMalloc((void**)&d_ruchy, 64 * sizeof(int));
int* plansza_vec = new int[64];
for(int i = 0; i < 64; i++) {
plansza_vec[i] = plansza[i/8][i%8];
}
hipMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelEvalPoints), dim3(1), dim3(64), 0, 0, d_plansza, d_ruchy, czyj_ruch);
hipDeviceSynchronize();
hipMemcpy(h_ruchy, d_ruchy, sizeof(int), hipMemcpyDeviceToHost);
//cout << "Obecna plansza punkty: " << h_ruchy[0] << endl;
int* pole_poczatkowe = new int[2];
int* pole_docelowe = new int[2];
int pionek;
//Runda gracza(czowieka)
if(gracze[czyj_ruch] == 0) {
//Pobieramy od gracza pole piona, ktory chce ruszyc i sprawdzamy,
//czy gracz posiada pion na takim polu
if(czyj_ruch == 0 || czyj_ruch == 1) {
string pole_str = "";
cout << "Podaj pole pionka:" << endl;
cin >> pole_str;
pole_poczatkowe = new int[2];
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
if(string_na_pole(plansza, pole_str, pole_poczatkowe) == 0) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
pionek = sprawdz_pionek(plansza, pole_poczatkowe, czyj_ruch);
if(pionek == 0) {
cout << "Nie masz pionka na tym polu!" << endl;
system("pause");
continue;
}
int start = pole_poczatkowe[0] * 8 + pole_poczatkowe[1];
hipMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelCheckAllMoves), dim3(1), dim3(64), 0, 0, d_plansza, d_ruchy, start, czyj_ruch);
hipDeviceSynchronize();
hipMemcpy(h_ruchy, d_ruchy, 64 * sizeof(int), hipMemcpyDeviceToHost);
vector<int> pionek_ruchy;
for(int i = 0; i < 64; i++) {
if(h_ruchy[i] < 64)
pionek_ruchy.push_back(h_ruchy[i]);
}
if(pionek_ruchy.empty()) {
cout << "Pionek ten nie moze wykonac zadnego ruchu!" << endl;
system("pause");
continue;
}
else {
cout << "Mozliwe ruchy:" << endl;
for(int i = 0; i < pionek_ruchy.size(); i++) {
int* pole_temp = new int[2];
pole_temp[0] = pionek_ruchy[i] / 8;
pole_temp[1] = pionek_ruchy[i] % 8;
string temp_str = pole_na_string(plansza, pole_temp);
cout << temp_str[0] << temp_str[1] << " ";
if(i % 5 == 0 && i != 0)
cout << endl;
delete []pole_temp;
}
cout << endl;
}
//Pobieramy od gracza pole, na ktore chce ruszyc sie poprzednio wybranym pionem
//i sprawdzamy, czy ruch jest prawidlowy
cout << "Podaj ruch pionka:" << endl;
cin >> pole_str;
pole_docelowe = new int[2];
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
else if(string_na_pole(plansza, pole_str, pole_docelowe) == 0) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
else if(pole_docelowe[0] == pole_poczatkowe[0] && pole_docelowe[1] == pole_poczatkowe[1]) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
hipMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), hipMemcpyHostToDevice);
start = pole_poczatkowe[0] * 8 + pole_poczatkowe[1];
int koniec = pole_docelowe[0] * 8 + pole_docelowe[1];
hipLaunchKernelGGL(( kernelCheckMove), dim3(1), dim3(1), 0, 0, d_plansza, d_ruchy, start, koniec, czyj_ruch);
hipDeviceSynchronize();
hipMemcpy(h_ruchy, d_ruchy, sizeof(int), hipMemcpyDeviceToHost);
if(h_ruchy[0] > 64) {
cout << "Nie mozesz wykonac takiego ruchu!" << endl;
system("pause");
continue;
}
}
//Sprawdzanie promocji piona, wybor figury przez gracza jesli nastapila ona
if(pionek == 1) {
bool promocja = 0;
if(czyj_ruch == 0 && pole_docelowe[0] == 0)
promocja = 1;
else if(czyj_ruch == 1 && pole_docelowe[0] == 7)
promocja = 1;
if(promocja == 1) {
int figura_promocja = 0;
cout << "Nastapila promocja! Wybierz figure, do ktorej ma awansowac pion:" << endl;
cout << "2 - wieza" << endl;
cout << "3 - goniec" << endl;
cout << "4 - skoczek" << endl;
cout << "5 - hetman" << endl;
cin >> figura_promocja;
if(figura_promocja >= 2 && figura_promocja <= 5) {
pionek = figura_promocja;
}
else {
cout << "Nieprawidlowo wybrana figura!" << endl;
system("pause");
continue;
}
}
}
}
//Pierwsza runda AI - wybor pierwszego ruchu
else if(runda == 1 || runda == 2) {
wybierz_rozpoczecie(plansza, pole_poczatkowe, pole_docelowe, czyj_ruch);
pionek = plansza[pole_poczatkowe[0]][pole_poczatkowe[1]];
string str_pocz = pole_na_string(plansza, pole_poczatkowe);
string str_doc = pole_na_string(plansza, pole_docelowe);
cout << "Wybrany ruch to " << str_pocz[0] << str_pocz[1] << "->" <<
str_doc[0] << str_doc[1] << endl;
string pole_str;
cout << "Wpisz 'r' aby zrestartowac rozgrywke, lub cokolwiek innego, eby AI wykonao ruch:" << endl;
cin >> pole_str;
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
}
//Runda AI
else {
vector<int> punkty_minimax;
int max_glebokosc = 3;
unsigned int rozmiar_minimax = pow(MAX_MOVES, max_glebokosc);
licz_graf_GPU(plansza_vec, punkty_minimax, 0, max_glebokosc, czyj_ruch, 1);
int* host_points = new int[punkty_minimax.size()];
for(int i = 0; i < punkty_minimax.size(); i++) {
host_points[i] = punkty_minimax[i];
}
int* dev_points;
int* dev_max;
int* dev_min;
//Alokacja pamieci
hipMalloc((void**)&dev_points, rozmiar_minimax * sizeof(int));
hipMalloc((void**)&dev_max, rozmiar_minimax * sizeof(int));
hipMalloc((void**)&dev_min, rozmiar_minimax * sizeof(int));
hipMemcpy(dev_points, host_points, rozmiar_minimax * sizeof(int), hipMemcpyHostToDevice);
int watki = MAX_MOVES;
unsigned int bloki = 1024;
unsigned int max_blocks = pow(2, 31) - 1;
for(int i = max_glebokosc; i > 1; i--) {
unsigned int rozmiar_wynik = pow(MAX_MOVES, i-1);
bloki = min(max_blocks, rozmiar_wynik);
if(i % 2 == 0) {
if(i == max_glebokosc) {
hipLaunchKernelGGL(( kernelMin), dim3(bloki), dim3(watki), 0, 0, dev_min, dev_points, rozmiar_wynik);
hipDeviceSynchronize();
}
else {
hipLaunchKernelGGL(( kernelMin), dim3(bloki), dim3(watki), 0, 0, dev_min, dev_max, rozmiar_wynik);
hipDeviceSynchronize();
}
}
else {
if(i == max_glebokosc) {
hipLaunchKernelGGL(( kernelMax), dim3(bloki), dim3(watki), 0, 0, dev_max, dev_points, rozmiar_wynik);
hipDeviceSynchronize();
}
else {
hipLaunchKernelGGL(( kernelMax), dim3(bloki), dim3(watki), 0, 0, dev_max, dev_min, rozmiar_wynik);
hipDeviceSynchronize();
}
}
}
vector<int> punkty_ostatni_wezel;
if(max_glebokosc != 1) {
hipMemcpy(host_points, dev_min, MAX_MOVES * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < MAX_MOVES; i++) {
punkty_ostatni_wezel.push_back(host_points[i]);
//cout << punkty_ostatni_wezel[i] << " ";
}
}
else {
for(int i = 0; i < MAX_MOVES; i++) {
punkty_ostatni_wezel.push_back(punkty_minimax[i]);
//cout << punkty_ostatni_wezel[i] << " ";
}
}
int* najlepszy_ruch = new int[2];
znajdz_najlepszy_ruch(plansza_vec, punkty_ostatni_wezel, najlepszy_ruch, czyj_ruch);
pole_poczatkowe[0] = najlepszy_ruch[0] / 8;
pole_poczatkowe[1] = najlepszy_ruch[0] % 8;
pole_docelowe[0] = najlepszy_ruch[1] / 8;
pole_docelowe[1] = najlepszy_ruch[1] % 8;
pionek = plansza[pole_poczatkowe[0]][pole_poczatkowe[1]] % 10;
//Promocja
if(pionek == 1 && (pole_docelowe[0] == 0 || pole_docelowe[0] == 7))
pionek = 5;
string str_pocz = pole_na_string(plansza, pole_poczatkowe);
string str_doc = pole_na_string(plansza, pole_docelowe);
cout << "Najlepszy wykryty ruch to " << str_pocz[0] << str_pocz[1] << "->" <<
str_doc[0] << str_doc[1] << endl;
string pole_str;
cout << "Wpisz 'r' aby zrestartowac rozgrywke, lub cokolwiek innego, eby AI wykonao ruch:" << endl;
cin >> pole_str;
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
delete[] najlepszy_ruch;
hipFree(dev_points);
hipFree(dev_min);
hipFree(dev_max);
}
//Sprawdzanie szach mat, ewentualne resetowanie gry
if(plansza[pole_docelowe[0]][pole_docelowe[1]] % 10 == 6) {
cout << "=================================" << endl;
if(czyj_ruch == 0)
cout << "Koniec gry! Wygrywa gracz bialy!" << endl;
else
cout << "Koniec gry! Wygrywa gracz Czarny!" << endl;
cout << "=================================" << endl;
string str;
cin >> str;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
//Przesuwanie pionka na nowe pole
if(czyj_ruch == 1)
pionek += 10;
plansza[pole_poczatkowe[0]][pole_poczatkowe[1]] = 0;
plansza[pole_docelowe[0]][pole_docelowe[1]] = pionek;
//Zmiana grajacego gracza
czyj_ruch = !czyj_ruch;
runda++;
delete []pole_poczatkowe;
delete []pole_docelowe;
hipFree(d_plansza);
hipFree(d_ruchy);
}
delete []gracze;
return 0;
} | cd19a7f0a97fc60fc89fd8c24fb6e9e58d9ef0a9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <time.h>
#include <chrono>
#include <iomanip>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define _USE_MATH_DEFINES
#define MAX_MOVES 32
#define MINIMAX_MAXVAL 50000
#define MINIMAX_MINVAL -50000
#define MASK 0xFFFFFFFF
using namespace std;
using namespace std::chrono;
//---------------------------------------------Funkcje GPU------------------------------------------------------------------
//-------------Szukanie pionkow gracza na planszy-----------------
__global__ void kernelFindPawns(int* board, int* pawns_id, bool whose_move)
{
if(threadIdx.x < 64) {
int pawn_pos = 1000;
if(whose_move == 0 && board[threadIdx.x] > 0 && board[threadIdx.x] < 10)
pawn_pos = threadIdx.x;
else if(whose_move == 1 && board[threadIdx.x] > 10)
pawn_pos = threadIdx.x;
pawns_id[threadIdx.x] = pawn_pos;
}
}
//-------------Wyznaczanie mozliwych ruchow-----------------
__inline__ __device__
bool checkMovePawn(int* board, int start_pos, int end_pos, bool whose_move)
{
int end_field = board[end_pos];
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(whose_move == 0){
if(start_col == end_col) {
if(end_row == start_row - 1 && end_field == 0)
move_possible = 1;
else if(end_row == start_row - 2 && end_field == 0 && start_row == 6) {
if(board[(start_row - 1)*8 + end_col] == 0)
move_possible = 1;
}
}
else if(abs(start_col - end_col) == 1) {
if(end_row == start_row - 1 && end_field != 0)
move_possible = 1;
}
}
else{
if(start_col == end_col) {
if(end_row == start_row + 1 && end_field == 0)
move_possible = 1;
else if(end_row == start_row + 2 && end_field == 0 && start_row == 1) {
if(board[(start_row + 1)*8 + end_col] == 0)
move_possible = 1;
}
}
else if(abs(start_col - end_col) == 1) {
if(end_row == start_row + 1 && end_field != 0)
move_possible = 1;
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveKnight(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(abs(start_col - end_col) == 2 && abs(start_row - end_row) == 1)
move_possible = 1;
else if(abs(start_col - end_col) == 1 && abs(start_row - end_row) == 2)
move_possible = 1;
return move_possible;
}
__inline__ __device__
bool checkMoveRook(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(end_col == start_col) {
move_possible = 1;
int delta_row = end_row - start_row;
int row_mult = 1;
if(delta_row < 0)
row_mult = -1;
else
delta_row++;
for(int i = 1; i < abs(delta_row); i++) {
int id = (start_row + i * row_mult) * 8 + end_col;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
else if(end_row == start_row) {
move_possible = 1;
int delta_col = end_col - start_col;
int col_mult = 1;
if(delta_col < 0)
col_mult = -1;
else
delta_col++;
for(int i = 1; i < abs(delta_col); i++) {
int id = start_row * 8 + start_col + i * col_mult;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveBishop(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
int delta_row = end_row - start_row;
int delta_col = end_col - start_col;
bool move_possible = 0;
if(abs(delta_row) == abs(delta_col)) {
move_possible = 1;
int row_mult = 1;
int col_mult = 1;
if(delta_row < 0)
row_mult = -1;
if(delta_col < 0)
col_mult = -1;
for(int i = 1; i < abs(delta_row); i++) {
int id = (start_row + i * row_mult) * 8 + start_col + i * col_mult;
if(board[id] != 0) {
move_possible = 0;
return move_possible;
}
}
}
return move_possible;
}
__inline__ __device__
bool checkMoveQueen(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(start_col == end_col || start_row == end_row)
move_possible = checkMoveRook(board, start_pos, end_pos);
else if(abs(start_row - end_row) == abs(start_col - end_col))
move_possible = checkMoveBishop(board, start_pos, end_pos);
return move_possible;
}
__inline__ __device__
bool checkMoveKing(int* board, int start_pos, int end_pos)
{
int start_row = start_pos / 8;
int start_col = start_pos % 8;
int end_row = end_pos / 8;
int end_col = end_pos % 8;
bool move_possible = 0;
if(abs(start_col - end_col) == 1 ||abs(start_col - end_col) == 0) {
if(abs(start_row - end_row) == 1 || abs(start_row - end_row) == 0)
move_possible = 1;
}
return move_possible;
}
__global__ void kernelCheckAllMoves(int* board, int* moves, int start_id, bool whose_move)
{
int pawn = board[start_id] % 10;
if(threadIdx.x < 64) {
int end_id = threadIdx.x;
bool move_possible;
if(whose_move == 0 && board[end_id] > 0 && board[end_id] < 10)
move_possible = 0;
else if(whose_move == 1 && board[end_id] > 10)
move_possible = 0;
else {
if(pawn == 1)
move_possible = checkMovePawn(board, start_id, end_id, whose_move);
else if(pawn == 2)
move_possible = checkMoveRook(board, start_id, end_id);
else if(pawn == 3)
move_possible = checkMoveBishop(board, start_id, end_id);
else if(pawn == 4)
move_possible = checkMoveKnight(board, start_id, end_id);
else if(pawn == 5)
move_possible = checkMoveQueen(board, start_id, end_id);
else if(pawn == 6)
move_possible = checkMoveKing(board, start_id, end_id);
}
int ret_val = 1000;
if(move_possible == 1)
ret_val = threadIdx.x;
moves[threadIdx.x] = ret_val;
}
}
__global__ void kernelCheckMove(int* board, int* move, int start_id, int end_id, bool whose_move)
{
int pawn = board[start_id] % 10;
if(threadIdx.x == 0) {
bool move_possible;
if(whose_move == 0 && board[end_id] > 0 && board[end_id] < 10)
move_possible = 0;
else if(whose_move == 1 && board[end_id] > 10)
move_possible = 0;
else {
if(pawn == 1)
move_possible = checkMovePawn(board, start_id, end_id, whose_move);
else if(pawn == 2)
move_possible = checkMoveRook(board, start_id, end_id);
else if(pawn == 3)
move_possible = checkMoveBishop(board, start_id, end_id);
else if(pawn == 4)
move_possible = checkMoveKnight(board, start_id, end_id);
else if(pawn == 5)
move_possible = checkMoveQueen(board, start_id, end_id);
else if(pawn == 6)
move_possible = checkMoveKing(board, start_id, end_id);
}
int ret_val = 1000;
if(move_possible == 1)
ret_val = threadIdx.x;
move[threadIdx.x] = ret_val;
}
}
//--------------------Obliczanie punktow planszy-------------------
__inline__ __device__
int getPointsPawn(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 10;
if(col == 0 || col == 7)
pos -= 2;
if(field_type < 10) {
if(row == 1)
points += 30;
else if(row == 0)
points += 70;
}
else {
if(row == 6)
points += 30;
else if(row == 7)
points += 70;
}
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsBishop(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 30;
if(row == 0 || row == 7)
points -= 4;
else if(row >= 2 && row <= 5)
points += 4;
if(col == 0 || col == 7)
points -= 4;
else if(col >= 2 && col <= 5)
points += 4;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsKnight(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 30;
if(whose_move == 0 && row == 0)
points -= 8;
else if(whose_move == 0 && row == 7)
points -= 8;
else if(row == 0 || row == 7)
points -= 4;
else if(row >= 2 && row <= 5)
points += 4;
if(col == 0 || col == 7)
points -= 4;
else if(col >= 2 && col <= 5)
points += 4;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsRook(int pos, int field_type, bool whose_move)
{
//int row = pos / 8;
int col = pos % 8;
int points = 50;
if(col == 0 || col == 7)
pos -= 6;
else if(col == 1 || col == 6)
pos -= 3;
else if(col == 3 || col == 4)
pos += 3;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsQueen(int pos, int field_type, bool whose_move)
{
//int row = pos / 8;
int col = pos % 8;
int points = 90;
if(col == 0 || col == 7)
points -= 5;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int getPointsKing(int pos, int field_type, bool whose_move)
{
int row = pos / 8;
int col = pos % 8;
int points = 1500;
if(row > 1 && row < 6)
points -= 20;
else if(row == 1 || row == 6)
points -= 5;
if(col == 3 || col == 4)
points -= 5;
if(whose_move == 0 && field_type > 10)
points *= -1;
else if(whose_move == 1 && field_type < 10)
points *= -1;
return points;
}
__inline__ __device__
int warpReductionPoints(int value) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
value += __shfl_down_sync(MASK, value, offset);
return value;
}
__inline__ __device__
int blockReductionPoints(int value)
{
static __shared__ double shared[32];
int w_id = threadIdx.x / warpSize;
int t_id = threadIdx.x % warpSize;
value = warpReductionPoints(value);
if(t_id == 0)
shared[w_id] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[t_id] : 0;
//Finalna redukcja w pierwszym warpie
if(w_id == 0)
value = warpReductionPoints(value);
return value;
}
__global__ void kernelEvalPoints(const int* board, int* points_out, bool whose_move)
{
if(threadIdx.x < 64) {
int pos_id = threadIdx.x;
int field = board[pos_id];
int points = 0;
if(field == 0)
points = 0;
else if(field % 10 == 1)
points = getPointsPawn(pos_id, field, whose_move);
else if(field % 10 == 2)
points = getPointsRook(pos_id, field, whose_move);
else if(field % 10 == 3)
points = getPointsBishop(pos_id, field, whose_move);
else if(field % 10 == 4)
points = getPointsKnight(pos_id, field, whose_move);
else if(field % 10 == 5)
points = getPointsQueen(pos_id, field, whose_move);
else if(field % 10 == 6)
points = getPointsKing(pos_id, field, whose_move);
points = blockReductionPoints(points);
if(threadIdx.x == 0)
points_out[blockIdx.x] = points;
}
}
//----------------------------Minimax-------------------------------------------
__global__ void kernelMax(int* max, int* points, unsigned int rozmiar)
{
__shared__ int max_values[MAX_MOVES];
int t_id = threadIdx.x;
unsigned int b_id = blockIdx.x * blockDim.x + threadIdx.x;
max_values[t_id] = points[b_id];
__syncthreads();
for (int i = 1; i < MAX_MOVES; i *= 2) {
if((t_id + i) < MAX_MOVES && max_values[t_id + i] != MINIMAX_MAXVAL) {
if (max_values[t_id + i] > max_values[t_id])
max_values[t_id] = max_values[t_id + i];
}
__syncthreads();
}
if (t_id == 0)
max[blockIdx.x] = max_values[t_id];
}
__global__ void kernelMin(int* min, int* points, unsigned int rozmiar)
{
__shared__ int min_values[MAX_MOVES];
int t_id = threadIdx.x;
unsigned int b_id = blockIdx.x * blockDim.x + threadIdx.x;
min_values[t_id] = points[b_id];
__syncthreads();
for (int i = 1; i < MAX_MOVES; i *= 2) {
if((t_id + i) < MAX_MOVES) {
if (min_values[t_id + i] < min_values[t_id] && min_values[t_id + i] != MINIMAX_MINVAL)
min_values[t_id] = min_values[t_id + i];
}
__syncthreads();
}
if (t_id == 0)
min[blockIdx.x] = min_values[t_id];
}
//------------------------------------------------Funkcje globalne----------------------------------------------------------
void resetuj_plansze(int** plansza, int* gracze)
{
/*Oznaczenia pionkow:
0 - puste pole
1/11 - pion gracza bialego/czarnego
2/12 - wieza gracza bialego/czarnego
3/13 - goniec gracza bialego/czarnego
4/14 - skoczek gracza bialego/czarnego
5/15 - hetman gracza bialego/czarnego
6/16 - krol gracza bialego/czarnego
*/
for(int i = 0; i < 8; i++) {
for(int j = 0; j < 8; j++) {
int id = 0;
//Dwa gorne rzedy na pionki gracza czarnego
if(i == 0) {
if(j == 0 || j == 7)
id = 12;
else if(j == 1 || j == 6)
id = 14;
else if(j == 2 || j == 5)
id = 13;
else if(j == 3)
id = 15;
else if(j == 4)
id = 16;
}
else if(i == 1) {
id = 11;
}
//Dwa dolne rzedy na pionki gracza bialego
else if(i == 7) {
if(j == 0 || j == 7)
id = 2;
else if(j == 1 || j == 6)
id = 4;
else if(j == 2 || j == 5)
id = 3;
else if(j == 3)
id = 5;
else if(j == 4)
id = 6;
}
else if(i == 6) {
id = 1;
}
//Reszta pol bez pionkow
plansza[i][j] = id;
}
}
while(true) {
system("clear");
int wartosc = -1;
cout << "Podaj gracza białego (0 - człowiek, 1 - AI)" << endl;
cin >> wartosc;
if(wartosc == 0 || wartosc == 1) {
gracze[0] = wartosc;
break;
}
}
while(true) {
system("clear");
int wartosc = -1;
cout << "Podaj gracza czarnego (0 - człowiek, 1 - AI)" << endl;
cin >> wartosc;
if(wartosc == 0 || wartosc == 1) {
gracze[1] = wartosc;
break;
}
}
}
void rysuj_plansze(int** plansza)
{
cout << "=||";
for(int i = 0; i < 8; i++) {
cout << "===|";
}
cout << "|";
cout << endl;
for(int i = 0; i < 8; i++) {
cout << 8 - i;
cout << "||";
for(int j = 0; j < 8; j++) {
int id = plansza[i][j];
char rysuj = ' ';
if(id % 10 == 1) {
if(id < 10)
rysuj = 'P';
else
rysuj = 'p';
}
else if(id % 10 == 2) {
if(id < 10)
rysuj = 'R';
else
rysuj = 'r';
}
else if(id % 10 == 3) {
if(id < 10)
rysuj = 'B';
else
rysuj = 'b';
}
else if(id % 10 == 4) {
if(id < 10)
rysuj = 'N';
else
rysuj = 'n';
}
else if(id % 10 == 5) {
if(id < 10)
rysuj = 'Q';
else
rysuj = 'q';
}
else if(id % 10 == 6) {
if(id < 10)
rysuj = 'K';
else
rysuj = 'k';
}
cout << " " << rysuj << " " << "|";
}
cout << "|";
//Wypisywanie legendy
switch(i) {
case 0:
cout << " Oznaczenia:";
break;
case 1:
cout << " male litery - pionki czarne";
break;
case 2:
cout << " R - wieza";
break;
case 3:
cout << " N - skoczek";
break;
case 4:
cout << " K - krol";
break;
}
cout << endl;
if(i < 7) {
cout << "-++";
for(int k = 0; k < 8; k++) {
cout << "---+";
}
cout << "+";
}
else {
cout << "=++";
for(int k = 0; k < 8; k++) {
cout << "===+";
}
cout << "+";
}
switch(i) {
case 0:
cout << " DUZE LITERY - pionki biale";
break;
case 1:
cout << " P - pion";
break;
case 2:
cout << " B - goniec";
break;
case 3:
cout << " Q - hetman";
break;
}
cout << endl;
}
cout << " ||";
for(int i = 0; i < 8; i++) {
cout << " " << char('A' + i) << " " << "|";
}
cout << "|";
cout << endl << endl;
}
//Zamiana 2 pierwszych znakow wczytanego od gracza stringa na indeksy rzedow i kolumn tablicy
//Jesli dowolny indeks wykracza poza tablice(indeks = <0, 7>) zwracamy wartosc false
bool string_na_pole(int** plansza, string pole_str, int* pole)
{
int kolumna = int(tolower(pole_str[0])) - (int)'a';
int rzad = (int)'8' - (int)pole_str[1];
if(rzad >= 0 && rzad <= 7)
pole[0] = rzad;
else
return 0;
if(kolumna >= 0 && kolumna <= 7)
pole[1] = kolumna;
else
return 0;
return 1;
}
string pole_na_string(int** plansza, int* pole)
{
string pole_str;
pole_str[0] = char(pole[1] + 65);
pole_str[1] = char(56 - pole[0]);
return pole_str;
}
//Zwraca typ pionka (0 jesli pionek jest nieprawidlowy)
int sprawdz_pionek(int** plansza, int* pole, bool czyj_ruch)
{
int kolumna = pole[1];
int rzad = pole[0];
int pionek = plansza[rzad][kolumna];
if(pionek == 0)
return 0;
else if(pionek > 0 && pionek < 10 && czyj_ruch == 0)
return pionek % 10;
else if(pionek > 10 && czyj_ruch == 1)
return pionek % 10;
return 0;
}
//Zwraca nazwe pionka na wybranym polu
string nazwa_pola(int** plansza, int* pole) {
int pole_doc_typ = plansza[pole[0]][pole[1]];
if(pole_doc_typ == 0)
return "Puste pole";
else if(pole_doc_typ % 10 == 1)
return "Pion";
else if(pole_doc_typ % 10 == 2)
return "Wieza";
else if(pole_doc_typ % 10 == 3)
return "Goniec";
else if(pole_doc_typ % 10 == 4)
return "Skoczek";
else if(pole_doc_typ % 10 == 5)
return "Hetman";
else if(pole_doc_typ % 10 == 6)
return "Krol";
return "Puste pole";
}
void wybierz_rozpoczecie(int** plansza, int* pole_pocz, int* pole_doc, bool czyj_ruch)
{
int losuj = rand() % 4;
if(czyj_ruch == 0) {
switch(losuj) {
case 0:
pole_pocz[0] = 6;
pole_pocz[1] = 4;
pole_doc[0] = 4;
pole_doc[1] = 4;
break;
case 1:
pole_pocz[0] = 6;
pole_pocz[1] = 3;
pole_doc[0] = 4;
pole_doc[1] = 3;
break;
case 2:
pole_pocz[0] = 7;
pole_pocz[1] = 6;
pole_doc[0] = 5;
pole_doc[1] = 5;
break;
case 3:
pole_pocz[0] = 7;
pole_pocz[1] = 1;
pole_doc[0] = 5;
pole_doc[1] = 2;
break;
}
}
else {
switch(losuj) {
case 0:
pole_pocz[0] = 1;
pole_pocz[1] = 4;
pole_doc[0] = 3;
pole_doc[1] = 4;
break;
case 1:
pole_pocz[0] = 1;
pole_pocz[1] = 3;
pole_doc[0] = 3;
pole_doc[1] = 3;
break;
case 2:
pole_pocz[0] = 0;
pole_pocz[1] = 6;
pole_doc[0] = 2;
pole_doc[1] = 5;
break;
case 3:
pole_pocz[0] = 0;
pole_pocz[1] = 1;
pole_doc[0] = 2;
pole_doc[1] = 2;
break;
}
}
}
//GPU tworzenie grafu
void licz_graf_GPU(int* plansza, vector<int> &punkty, int depth, int max_depth, bool czyj_ruch, bool czy_wezel_niepusty)
{
if(depth == max_depth) {
int punkty_wezel = 0;
int* h_punkty = new int[64];
bool czyj_ruch_nowy = 0;
if(depth % 2 == 0)
czyj_ruch_nowy = czyj_ruch;
else
czyj_ruch_nowy = !czyj_ruch;
if(czy_wezel_niepusty == 1) {
int* h_ruchy = new int[64];
int* d_punkty;
int* d_plansza;
cudaMalloc((void**)&d_plansza, 64 * sizeof(int));
cudaMalloc((void**)&d_punkty, 64 * sizeof(int));
cudaMemcpy(d_plansza, plansza, 64 * sizeof(int), cudaMemcpyHostToDevice);
kernelEvalPoints<<<1,64>>>(d_plansza, d_punkty, czyj_ruch_nowy);
cudaDeviceSynchronize();
cudaMemcpy(h_punkty, d_punkty, sizeof(int), cudaMemcpyDeviceToHost);
punkty_wezel = h_punkty[0];
delete[] h_punkty;
cudaFree(d_punkty);
cudaFree(d_plansza);
}
else {
if(max_depth % 2 == 1)
punkty_wezel = MINIMAX_MINVAL;
else
punkty_wezel = MINIMAX_MAXVAL;
}
punkty.push_back(punkty_wezel);
}
else if(czy_wezel_niepusty == 0) {
for(int i = 0; i < 32; i++) {
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 0);
}
}
else {
//Znajdowanie pionkow
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
cudaMalloc((void**)&d_plansza, 64 * sizeof(int));
cudaMalloc((void**)&d_ruchy, 64 * sizeof(int));
cudaMemcpy(d_plansza, plansza, 64 * sizeof(int), cudaMemcpyHostToDevice);
vector<int> pionki;
kernelFindPawns<<<1,64>>>(d_plansza, d_ruchy, czyj_ruch);
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int i = 0; i < 16; i++) {
if(thrust_tab[i] < 64) {
pionki.push_back(thrust_tab[i]);
}
else
break;
}
//Szukanie mozliwych ruchow
vector<int> ruchy_start;
vector<int> ruchy_doc;
for(int i = 0; i < pionki.size(); i++) {
if(ruchy_start.size() >= 32)
break;
kernelCheckAllMoves<<<1,64>>>(d_plansza, d_ruchy, pionki[i], czyj_ruch);
cudaDeviceSynchronize();
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int j = 0; j < 64; j++) {
if(thrust_tab[j] < 64) {
ruchy_start.push_back(pionki[i]);
ruchy_doc.push_back(thrust_tab[j]);
}
else
break;
}
}
delete[] h_ruchy;
cudaFree(d_ruchy);
cudaFree(d_plansza);
//Rozszerzanie grafu dla wyliczonych ruchow
for(int i = 0; i < 32; i++) {
if(i >= ruchy_start.size())
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 0);
else{
int start_id = ruchy_start[i];
int doc_id = ruchy_doc[i];
int pole_start_typ = plansza[start_id];
int pole_doc_typ = plansza[doc_id];
plansza[doc_id] = pole_start_typ;
plansza[start_id] = 0;
//Wybor przez bota hetmana w razie promocji
if(pole_start_typ == 0 && (doc_id/8 == 0 || doc_id/8 == 7)) {
int pole_prom = 0;
if(czyj_ruch == 0)
pole_prom = 5;
else
pole_prom = 15;
plansza[doc_id] = pole_prom;
}
licz_graf_GPU(plansza, punkty, depth + 1, max_depth, !czyj_ruch, 1);
plansza[doc_id] = pole_doc_typ;
plansza[start_id] = pole_start_typ;
}
}
}
}
void znajdz_najlepszy_ruch(int* plansza, vector<int> punkty, int* max_ruch, bool czyj_ruch)
{
//Znajdowanie pionkow
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
cudaMalloc((void**)&d_plansza, 64 * sizeof(int));
cudaMalloc((void**)&d_ruchy, 64 * sizeof(int));
cudaMemcpy(d_plansza, plansza, 64 * sizeof(int), cudaMemcpyHostToDevice);
vector<int> pionki;
kernelFindPawns<<<1,64>>>(d_plansza, d_ruchy, czyj_ruch);
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int i = 0; i < 16; i++) {
if(thrust_tab[i] < 64) {
pionki.push_back(thrust_tab[i]);
}
else
break;
}
//Szukanie mozliwych ruchow
vector<int> ruchy_start;
vector<int> ruchy_doc;
for(int i = 0; i < pionki.size(); i++) {
if(ruchy_start.size() >= 32)
break;
kernelCheckAllMoves<<<1,64>>>(d_plansza, d_ruchy, pionki[i], czyj_ruch);
cudaDeviceSynchronize();
//Sortowanie
thrust::device_ptr<int> thrust_tab(d_ruchy);
thrust::sort(thrust_tab, thrust_tab + 64);
for(int j = 0; j < 64; j++) {
if(thrust_tab[j] < 64) {
ruchy_start.push_back(pionki[i]);
ruchy_doc.push_back(thrust_tab[j]);
}
else
break;
}
}
delete[] h_ruchy;
cudaFree(d_ruchy);
cudaFree(d_plansza);
int max_val = MINIMAX_MINVAL;
int max_id_start = 0;
int max_id_doc = 0;
for(int i = 0; i < MAX_MOVES; i++) {
if(punkty[i] >= max_val) {
max_val = punkty[i];
max_id_start = ruchy_start[i];
max_id_doc = ruchy_doc[i];
}
}
max_ruch[0] = max_id_start;
max_ruch[1] = max_id_doc;
}
//--------------------------------------------Main------------------------------------------------------------------
int main()
{
srand(time(NULL));
//Tworzenie dwuwymiarowej tablicy 8x8 odpowiadajacej za figury znajdujace sie na polach
int** plansza = new int* [8];
int* gracze = new int[2];
for (int i = 0; i < 8; i++) {
plansza[i] = new int[8];
}
resetuj_plansze(plansza, gracze);
bool czyj_ruch = 0; //0 - biale, 1 - czarne
int runda = 1;
while(true) {
system("clear");
cout << "Wpisz 'r' w celu zresetowania gry" << endl << endl << endl;
//Rysowanie planszy i calego "GUI"
rysuj_plansze(plansza);
if(czyj_ruch == 0)
cout << "Obecny ruch - gracz BIALY" << endl << endl;
else
cout << "Obecny ruch - gracz CZARNY" << endl << endl;
int* h_ruchy = new int[64];
int* d_ruchy;
int* d_plansza;
cudaMalloc((void**)&d_plansza, 64 * sizeof(int));
cudaMalloc((void**)&d_ruchy, 64 * sizeof(int));
int* plansza_vec = new int[64];
for(int i = 0; i < 64; i++) {
plansza_vec[i] = plansza[i/8][i%8];
}
cudaMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), cudaMemcpyHostToDevice);
kernelEvalPoints<<<1, 64>>>(d_plansza, d_ruchy, czyj_ruch);
cudaDeviceSynchronize();
cudaMemcpy(h_ruchy, d_ruchy, sizeof(int), cudaMemcpyDeviceToHost);
//cout << "Obecna plansza punkty: " << h_ruchy[0] << endl;
int* pole_poczatkowe = new int[2];
int* pole_docelowe = new int[2];
int pionek;
//Runda gracza(człowieka)
if(gracze[czyj_ruch] == 0) {
//Pobieramy od gracza pole piona, ktory chce ruszyc i sprawdzamy,
//czy gracz posiada pion na takim polu
if(czyj_ruch == 0 || czyj_ruch == 1) {
string pole_str = "";
cout << "Podaj pole pionka:" << endl;
cin >> pole_str;
pole_poczatkowe = new int[2];
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
if(string_na_pole(plansza, pole_str, pole_poczatkowe) == 0) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
pionek = sprawdz_pionek(plansza, pole_poczatkowe, czyj_ruch);
if(pionek == 0) {
cout << "Nie masz pionka na tym polu!" << endl;
system("pause");
continue;
}
int start = pole_poczatkowe[0] * 8 + pole_poczatkowe[1];
cudaMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), cudaMemcpyHostToDevice);
kernelCheckAllMoves<<<1, 64>>>(d_plansza, d_ruchy, start, czyj_ruch);
cudaDeviceSynchronize();
cudaMemcpy(h_ruchy, d_ruchy, 64 * sizeof(int), cudaMemcpyDeviceToHost);
vector<int> pionek_ruchy;
for(int i = 0; i < 64; i++) {
if(h_ruchy[i] < 64)
pionek_ruchy.push_back(h_ruchy[i]);
}
if(pionek_ruchy.empty()) {
cout << "Pionek ten nie moze wykonac zadnego ruchu!" << endl;
system("pause");
continue;
}
else {
cout << "Mozliwe ruchy:" << endl;
for(int i = 0; i < pionek_ruchy.size(); i++) {
int* pole_temp = new int[2];
pole_temp[0] = pionek_ruchy[i] / 8;
pole_temp[1] = pionek_ruchy[i] % 8;
string temp_str = pole_na_string(plansza, pole_temp);
cout << temp_str[0] << temp_str[1] << " ";
if(i % 5 == 0 && i != 0)
cout << endl;
delete []pole_temp;
}
cout << endl;
}
//Pobieramy od gracza pole, na ktore chce ruszyc sie poprzednio wybranym pionem
//i sprawdzamy, czy ruch jest prawidlowy
cout << "Podaj ruch pionka:" << endl;
cin >> pole_str;
pole_docelowe = new int[2];
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
else if(string_na_pole(plansza, pole_str, pole_docelowe) == 0) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
else if(pole_docelowe[0] == pole_poczatkowe[0] && pole_docelowe[1] == pole_poczatkowe[1]) {
cout << "Nieprawidlowe pole!" << endl;
system("pause");
continue;
}
cudaMemcpy(d_plansza, plansza_vec, 64 * sizeof(int), cudaMemcpyHostToDevice);
start = pole_poczatkowe[0] * 8 + pole_poczatkowe[1];
int koniec = pole_docelowe[0] * 8 + pole_docelowe[1];
kernelCheckMove<<<1, 1>>>(d_plansza, d_ruchy, start, koniec, czyj_ruch);
cudaDeviceSynchronize();
cudaMemcpy(h_ruchy, d_ruchy, sizeof(int), cudaMemcpyDeviceToHost);
if(h_ruchy[0] > 64) {
cout << "Nie mozesz wykonac takiego ruchu!" << endl;
system("pause");
continue;
}
}
//Sprawdzanie promocji piona, wybor figury przez gracza jesli nastapila ona
if(pionek == 1) {
bool promocja = 0;
if(czyj_ruch == 0 && pole_docelowe[0] == 0)
promocja = 1;
else if(czyj_ruch == 1 && pole_docelowe[0] == 7)
promocja = 1;
if(promocja == 1) {
int figura_promocja = 0;
cout << "Nastapila promocja! Wybierz figure, do ktorej ma awansowac pion:" << endl;
cout << "2 - wieza" << endl;
cout << "3 - goniec" << endl;
cout << "4 - skoczek" << endl;
cout << "5 - hetman" << endl;
cin >> figura_promocja;
if(figura_promocja >= 2 && figura_promocja <= 5) {
pionek = figura_promocja;
}
else {
cout << "Nieprawidlowo wybrana figura!" << endl;
system("pause");
continue;
}
}
}
}
//Pierwsza runda AI - wybor pierwszego ruchu
else if(runda == 1 || runda == 2) {
wybierz_rozpoczecie(plansza, pole_poczatkowe, pole_docelowe, czyj_ruch);
pionek = plansza[pole_poczatkowe[0]][pole_poczatkowe[1]];
string str_pocz = pole_na_string(plansza, pole_poczatkowe);
string str_doc = pole_na_string(plansza, pole_docelowe);
cout << "Wybrany ruch to " << str_pocz[0] << str_pocz[1] << "->" <<
str_doc[0] << str_doc[1] << endl;
string pole_str;
cout << "Wpisz 'r' aby zrestartowac rozgrywke, lub cokolwiek innego, żeby AI wykonało ruch:" << endl;
cin >> pole_str;
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
}
//Runda AI
else {
vector<int> punkty_minimax;
int max_glebokosc = 3;
unsigned int rozmiar_minimax = pow(MAX_MOVES, max_glebokosc);
licz_graf_GPU(plansza_vec, punkty_minimax, 0, max_glebokosc, czyj_ruch, 1);
int* host_points = new int[punkty_minimax.size()];
for(int i = 0; i < punkty_minimax.size(); i++) {
host_points[i] = punkty_minimax[i];
}
int* dev_points;
int* dev_max;
int* dev_min;
//Alokacja pamieci
cudaMalloc((void**)&dev_points, rozmiar_minimax * sizeof(int));
cudaMalloc((void**)&dev_max, rozmiar_minimax * sizeof(int));
cudaMalloc((void**)&dev_min, rozmiar_minimax * sizeof(int));
cudaMemcpy(dev_points, host_points, rozmiar_minimax * sizeof(int), cudaMemcpyHostToDevice);
int watki = MAX_MOVES;
unsigned int bloki = 1024;
unsigned int max_blocks = pow(2, 31) - 1;
for(int i = max_glebokosc; i > 1; i--) {
unsigned int rozmiar_wynik = pow(MAX_MOVES, i-1);
bloki = min(max_blocks, rozmiar_wynik);
if(i % 2 == 0) {
if(i == max_glebokosc) {
kernelMin<<<bloki, watki>>>(dev_min, dev_points, rozmiar_wynik);
cudaDeviceSynchronize();
}
else {
kernelMin<<<bloki, watki>>>(dev_min, dev_max, rozmiar_wynik);
cudaDeviceSynchronize();
}
}
else {
if(i == max_glebokosc) {
kernelMax<<<bloki, watki>>>(dev_max, dev_points, rozmiar_wynik);
cudaDeviceSynchronize();
}
else {
kernelMax<<<bloki, watki>>>(dev_max, dev_min, rozmiar_wynik);
cudaDeviceSynchronize();
}
}
}
vector<int> punkty_ostatni_wezel;
if(max_glebokosc != 1) {
cudaMemcpy(host_points, dev_min, MAX_MOVES * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < MAX_MOVES; i++) {
punkty_ostatni_wezel.push_back(host_points[i]);
//cout << punkty_ostatni_wezel[i] << " ";
}
}
else {
for(int i = 0; i < MAX_MOVES; i++) {
punkty_ostatni_wezel.push_back(punkty_minimax[i]);
//cout << punkty_ostatni_wezel[i] << " ";
}
}
int* najlepszy_ruch = new int[2];
znajdz_najlepszy_ruch(plansza_vec, punkty_ostatni_wezel, najlepszy_ruch, czyj_ruch);
pole_poczatkowe[0] = najlepszy_ruch[0] / 8;
pole_poczatkowe[1] = najlepszy_ruch[0] % 8;
pole_docelowe[0] = najlepszy_ruch[1] / 8;
pole_docelowe[1] = najlepszy_ruch[1] % 8;
pionek = plansza[pole_poczatkowe[0]][pole_poczatkowe[1]] % 10;
//Promocja
if(pionek == 1 && (pole_docelowe[0] == 0 || pole_docelowe[0] == 7))
pionek = 5;
string str_pocz = pole_na_string(plansza, pole_poczatkowe);
string str_doc = pole_na_string(plansza, pole_docelowe);
cout << "Najlepszy wykryty ruch to " << str_pocz[0] << str_pocz[1] << "->" <<
str_doc[0] << str_doc[1] << endl;
string pole_str;
cout << "Wpisz 'r' aby zrestartowac rozgrywke, lub cokolwiek innego, żeby AI wykonało ruch:" << endl;
cin >> pole_str;
if(tolower(pole_str[0]) == 'r') {
cout << "Gra zostanie zresetowana!" << endl;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
delete[] najlepszy_ruch;
cudaFree(dev_points);
cudaFree(dev_min);
cudaFree(dev_max);
}
//Sprawdzanie szach mat, ewentualne resetowanie gry
if(plansza[pole_docelowe[0]][pole_docelowe[1]] % 10 == 6) {
cout << "=================================" << endl;
if(czyj_ruch == 0)
cout << "Koniec gry! Wygrywa gracz bialy!" << endl;
else
cout << "Koniec gry! Wygrywa gracz Czarny!" << endl;
cout << "=================================" << endl;
string str;
cin >> str;
system("pause");
resetuj_plansze(plansza, gracze);
czyj_ruch = 0;
runda = 1;
continue;
}
//Przesuwanie pionka na nowe pole
if(czyj_ruch == 1)
pionek += 10;
plansza[pole_poczatkowe[0]][pole_poczatkowe[1]] = 0;
plansza[pole_docelowe[0]][pole_docelowe[1]] = pionek;
//Zmiana grajacego gracza
czyj_ruch = !czyj_ruch;
runda++;
delete []pole_poczatkowe;
delete []pole_docelowe;
cudaFree(d_plansza);
cudaFree(d_ruchy);
}
delete []gracze;
return 0;
} |
ad45c7f017e1cddeb4472d263c4f24a94feaa026.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/scalar_add.hpp"
template <typename T>
__global__ void scalar_add_kernel(T* x, size_t n, size_t s, const T beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] += beta;
}
}
template <>
__global__ void scalar_add_kernel(hipDoubleComplex* x, size_t n, size_t s, const hipDoubleComplex beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] = cuCadd(x[s * index], beta);
}
}
template <>
__global__ void scalar_add_kernel(hipComplex* x, size_t n, size_t s, const hipComplex beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] = cuCaddf(x[s * index], beta);
}
}
template <typename T>
void scalar_add_kernel_run(T* x, size_t n, size_t s, T beta) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scalar_add_kernel<T>, 0, 0);
int gridSize = ((n / s) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( scalar_add_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, n, s, beta);
hipDeviceSynchronize();
}
void egblas_scalar_sadd(float* x, size_t n, size_t s, float beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_dadd(double* x, size_t n, size_t s, double beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_cadd(hipComplex* x, size_t n, size_t s, hipComplex beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_zadd(hipDoubleComplex* x, size_t n, size_t s, hipDoubleComplex beta) {
scalar_add_kernel_run(x, n, s, beta);
}
| ad45c7f017e1cddeb4472d263c4f24a94feaa026.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/scalar_add.hpp"
template <typename T>
__global__ void scalar_add_kernel(T* x, size_t n, size_t s, const T beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] += beta;
}
}
template <>
__global__ void scalar_add_kernel(cuDoubleComplex* x, size_t n, size_t s, const cuDoubleComplex beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] = cuCadd(x[s * index], beta);
}
}
template <>
__global__ void scalar_add_kernel(cuComplex* x, size_t n, size_t s, const cuComplex beta) {
auto index = 1 * (threadIdx.x + blockIdx.x * blockDim.x);
auto stride = 1 * (blockDim.x * gridDim.x);
for (; index < n; index += stride) {
x[s * index] = cuCaddf(x[s * index], beta);
}
}
template <typename T>
void scalar_add_kernel_run(T* x, size_t n, size_t s, T beta) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scalar_add_kernel<T>, 0, 0);
int gridSize = ((n / s) + blockSize - 1) / blockSize;
scalar_add_kernel<T><<<gridSize, blockSize>>>(x, n, s, beta);
cudaDeviceSynchronize();
}
void egblas_scalar_sadd(float* x, size_t n, size_t s, float beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_dadd(double* x, size_t n, size_t s, double beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_cadd(cuComplex* x, size_t n, size_t s, cuComplex beta) {
scalar_add_kernel_run(x, n, s, beta);
}
void egblas_scalar_zadd(cuDoubleComplex* x, size_t n, size_t s, cuDoubleComplex beta) {
scalar_add_kernel_run(x, n, s, beta);
}
|
1bde59dbcbdbf82655e87b8de9e32ef3d4121cef.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
// All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
//
// SPDX-License-Identifier: BSD-2-Clause
//
// This file is part of CEED: http://github.com/ceed
#include <ceed.h>
#include <hip/hip_runtime.h>
//------------------------------------------------------------------------------
// Kernel for set value on device
//------------------------------------------------------------------------------
__global__ static void setValueK(CeedScalar * __restrict__ vec, CeedSize size,
CeedScalar val) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
vec[idx] = val;
}
//------------------------------------------------------------------------------
// Set value on device memory
//------------------------------------------------------------------------------
extern "C" int CeedDeviceSetValue_Cuda(CeedScalar* d_array, CeedSize length,
CeedScalar val) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( setValueK), dim3(gridsize),dim3(bsize), 0, 0, d_array, length, val);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for taking reciprocal
//------------------------------------------------------------------------------
__global__ static void rcpValueK(CeedScalar * __restrict__ vec, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
if (fabs(vec[idx]) > 1E-16)
vec[idx] = 1./vec[idx];
}
//------------------------------------------------------------------------------
// Take vector reciprocal in device memory
//------------------------------------------------------------------------------
extern "C" int CeedDeviceReciprocal_Cuda(CeedScalar* d_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( rcpValueK), dim3(gridsize),dim3(bsize), 0, 0, d_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for scale
//------------------------------------------------------------------------------
__global__ static void scaleValueK(CeedScalar * __restrict__ x, CeedScalar alpha,
CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
x[idx] *= alpha;
}
//------------------------------------------------------------------------------
// Compute x = alpha x on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceScale_Cuda(CeedScalar *x_array, CeedScalar alpha,
CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( scaleValueK), dim3(gridsize),dim3(bsize), 0, 0, x_array, alpha, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for axpy
//------------------------------------------------------------------------------
__global__ static void axpyValueK(CeedScalar * __restrict__ y, CeedScalar alpha,
CeedScalar * __restrict__ x, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
y[idx] += alpha * x[idx];
}
//------------------------------------------------------------------------------
// Compute y = alpha x + y on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceAXPY_Cuda(CeedScalar *y_array, CeedScalar alpha,
CeedScalar *x_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( axpyValueK), dim3(gridsize),dim3(bsize), 0, 0, y_array, alpha, x_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for axpby
//------------------------------------------------------------------------------
__global__ static void axpbyValueK(CeedScalar * __restrict__ y, CeedScalar alpha, CeedScalar beta,
CeedScalar * __restrict__ x, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
y[idx] = beta * y[idx];
y[idx] += alpha * x[idx];
}
//------------------------------------------------------------------------------
// Compute y = alpha x + beta y on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceAXPBY_Cuda(CeedScalar *y_array, CeedScalar alpha, CeedScalar beta,
CeedScalar *x_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( axpbyValueK), dim3(gridsize),dim3(bsize), 0, 0, y_array, alpha, beta, x_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for pointwise mult
//------------------------------------------------------------------------------
__global__ static void pointwiseMultValueK(CeedScalar * __restrict__ w,
CeedScalar * x, CeedScalar * __restrict__ y, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
w[idx] = x[idx] * y[idx];
}
//------------------------------------------------------------------------------
// Compute the pointwise multiplication w = x .* y on device
//------------------------------------------------------------------------------
extern "C" int CeedDevicePointwiseMult_Cuda(CeedScalar *w_array, CeedScalar *x_array,
CeedScalar *y_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( pointwiseMultValueK), dim3(gridsize),dim3(bsize), 0, 0, w_array, x_array, y_array, length);
return 0;
}
//------------------------------------------------------------------------------
| 1bde59dbcbdbf82655e87b8de9e32ef3d4121cef.cu | // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
// All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
//
// SPDX-License-Identifier: BSD-2-Clause
//
// This file is part of CEED: http://github.com/ceed
#include <ceed.h>
#include <cuda.h>
//------------------------------------------------------------------------------
// Kernel for set value on device
//------------------------------------------------------------------------------
__global__ static void setValueK(CeedScalar * __restrict__ vec, CeedSize size,
CeedScalar val) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
vec[idx] = val;
}
//------------------------------------------------------------------------------
// Set value on device memory
//------------------------------------------------------------------------------
extern "C" int CeedDeviceSetValue_Cuda(CeedScalar* d_array, CeedSize length,
CeedScalar val) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
setValueK<<<gridsize,bsize>>>(d_array, length, val);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for taking reciprocal
//------------------------------------------------------------------------------
__global__ static void rcpValueK(CeedScalar * __restrict__ vec, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
if (fabs(vec[idx]) > 1E-16)
vec[idx] = 1./vec[idx];
}
//------------------------------------------------------------------------------
// Take vector reciprocal in device memory
//------------------------------------------------------------------------------
extern "C" int CeedDeviceReciprocal_Cuda(CeedScalar* d_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
rcpValueK<<<gridsize,bsize>>>(d_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for scale
//------------------------------------------------------------------------------
__global__ static void scaleValueK(CeedScalar * __restrict__ x, CeedScalar alpha,
CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
x[idx] *= alpha;
}
//------------------------------------------------------------------------------
// Compute x = alpha x on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceScale_Cuda(CeedScalar *x_array, CeedScalar alpha,
CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
scaleValueK<<<gridsize,bsize>>>(x_array, alpha, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for axpy
//------------------------------------------------------------------------------
__global__ static void axpyValueK(CeedScalar * __restrict__ y, CeedScalar alpha,
CeedScalar * __restrict__ x, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
y[idx] += alpha * x[idx];
}
//------------------------------------------------------------------------------
// Compute y = alpha x + y on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceAXPY_Cuda(CeedScalar *y_array, CeedScalar alpha,
CeedScalar *x_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
axpyValueK<<<gridsize,bsize>>>(y_array, alpha, x_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for axpby
//------------------------------------------------------------------------------
__global__ static void axpbyValueK(CeedScalar * __restrict__ y, CeedScalar alpha, CeedScalar beta,
CeedScalar * __restrict__ x, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
y[idx] = beta * y[idx];
y[idx] += alpha * x[idx];
}
//------------------------------------------------------------------------------
// Compute y = alpha x + beta y on device
//------------------------------------------------------------------------------
extern "C" int CeedDeviceAXPBY_Cuda(CeedScalar *y_array, CeedScalar alpha, CeedScalar beta,
CeedScalar *x_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
axpbyValueK<<<gridsize,bsize>>>(y_array, alpha, beta, x_array, length);
return 0;
}
//------------------------------------------------------------------------------
// Kernel for pointwise mult
//------------------------------------------------------------------------------
__global__ static void pointwiseMultValueK(CeedScalar * __restrict__ w,
CeedScalar * x, CeedScalar * __restrict__ y, CeedSize size) {
CeedSize idx = threadIdx.x + (CeedSize)blockDim.x * blockIdx.x;
if (idx >= size)
return;
w[idx] = x[idx] * y[idx];
}
//------------------------------------------------------------------------------
// Compute the pointwise multiplication w = x .* y on device
//------------------------------------------------------------------------------
extern "C" int CeedDevicePointwiseMult_Cuda(CeedScalar *w_array, CeedScalar *x_array,
CeedScalar *y_array, CeedSize length) {
const int bsize = 512;
const CeedSize vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
pointwiseMultValueK<<<gridsize,bsize>>>(w_array, x_array, y_array, length);
return 0;
}
//------------------------------------------------------------------------------
|
64e980025f70fcee9e591f5bf3e107b9574f2571.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
#include <hip/hip_complex.h>
__global__ void pow_float(int n,int idx,float *dy,int incy,float raise,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = powf(dy[i],raise);
}
}
| 64e980025f70fcee9e591f5bf3e107b9574f2571.cu | extern "C"
#include <math.h>
#include <cuComplex.h>
__global__ void pow_float(int n,int idx,float *dy,int incy,float raise,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = powf(dy[i],raise);
}
}
|
d453bbd18e031c9542995d5a68e5c711f1a465c2.hip | // !!! This is a file automatically generated by hipify!!!
/*
@author : Nilanka Manoj
@compile : nvcc meanfilter.cu -o build/meanfilter lib/EasyBMP.cpp
@run : ./build/meanfilter <<imgSize>> <<kernal>>
*/
#include <iostream>
#include <fstream>
#include "lib/EasyBMP.h"
#include <string>
#include <hip/hip_runtime.h>
using namespace std;
int *matrix, *cpuOut, *gpuOut;
int *matrix_d, *out_d;
__global__ void filter_gpu_ker3(int *matrix_in, int *matrix_out, int imgSize)
{
int i = max(imgSize / blockDim.x + 1, blockIdx.x) * blockDim.x + threadIdx.x;
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1]) /
9;
}
__global__ void filter_gpu_ker5(int *matrix_in, int *matrix_out, int imgSize)
{
int i = max((imgSize / blockDim.x) * 2 + 1, blockIdx.x) * blockDim.x + threadIdx.x;
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] + matrix_in[i + 2] + matrix_in[i - 2] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] + +matrix_in[i - imgSize + 2] + matrix_in[i - imgSize - 2] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1] + matrix_in[i + imgSize + 2] + matrix_in[i + imgSize - 2] +
matrix_in[i - imgSize * 2] + matrix_in[i - imgSize * 2 + 1] + matrix_in[i - imgSize * 2 - 1] + +matrix_in[i - imgSize * 2 + 2] + matrix_in[i - imgSize * 2 - 2] +
matrix_in[i + imgSize * 2] + matrix_in[i + imgSize * 2 + 1] + matrix_in[i + imgSize * 2 - 1] + matrix_in[i + imgSize * 2 + 2] + matrix_in[i + imgSize * 2 - 2]) /
25;
}
void filter_cpu_ker3(int *matrix_in, int *matrix_out, int imgSize)
{
for (int i = imgSize; i < imgSize * (imgSize - 1); i++)
{
int x = i % imgSize;
if (x != 0 && x != imgSize - 1)
{
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1]) /
9;
}
}
}
void filter_cpu_ker5(int *matrix_in, int *matrix_out, int imgSize)
{
for (int i = imgSize * 2; i < imgSize * (imgSize - 2); i++)
{
int x = i % imgSize;
if (x != 0 && x != imgSize - 1 && x != 1 && x != imgSize - 2)
{
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] + matrix_in[i + 2] + matrix_in[i - 2] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] + +matrix_in[i - imgSize + 2] + matrix_in[i - imgSize - 2] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1] + matrix_in[i + imgSize + 2] + matrix_in[i + imgSize - 2] +
matrix_in[i - imgSize * 2] + matrix_in[i - imgSize * 2 + 1] + matrix_in[i - imgSize * 2 - 1] + +matrix_in[i - imgSize * 2 + 2] + matrix_in[i - imgSize * 2 - 2] +
matrix_in[i + imgSize * 2] + matrix_in[i + imgSize * 2 + 1] + matrix_in[i + imgSize * 2 - 1] + matrix_in[i + imgSize * 2 + 2] + matrix_in[i + imgSize * 2 - 2]) /
25;
}
}
}
void img_to_matrix(char *file, int imgSize, int *matrix)
{
BMP Image;
Image.ReadFromFile(file);
for (int i = 0; i < imgSize; i++)
{
for (int j = 0; j < imgSize; j++)
{
int b = Image(i, j)->Blue;
matrix[i * imgSize + j] = b;
}
}
cout << "matrix is extracted from :";
cout << file << endl;
}
void matrix_to_img(char *file, int imgSize, int *matrix)
{
BMP Image;
Image.SetSize(imgSize, imgSize);
for (int i = 0; i < imgSize; i++)
{
for (int j = 0; j < imgSize; j++)
{
int b = matrix[i * imgSize + j];
Image(i, j)->Blue = b;
Image(i, j)->Green = b;
Image(i, j)->Red = b;
}
}
Image.SetBitDepth(8);
CreateGrayscaleColorTable(Image);
Image.WriteToFile(file);
cout << "matrix is saved to :";
cout << file << endl;
}
void runRound(int imgSize, char *file, int kernal, char *gpu_file, char *cpu_file)
{
cout << "======================round starting====================================\n";
matrix = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
cpuOut = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
gpuOut = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
img_to_matrix(file, imgSize, matrix);
hipMalloc((void **)&matrix_d, (imgSize) * (imgSize) * sizeof(int));
hipMalloc((void **)&out_d, (imgSize) * (imgSize) * sizeof(int));
hipMemcpy(matrix_d, matrix, imgSize * imgSize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(out_d, matrix, imgSize * imgSize * sizeof(int), hipMemcpyHostToDevice);
cpuOut = matrix;
printf("Doing GPU Filtering\n");
clock_t start_d = clock();
if (kernal == 3)
{
hipLaunchKernelGGL(( filter_gpu_ker3), dim3(imgSize * imgSize / 64), dim3(64), 0, 0, matrix_d, out_d, imgSize);
}
else if (kernal == 5)
{
hipLaunchKernelGGL(( filter_gpu_ker5), dim3(imgSize * imgSize / 64), dim3(64), 0, 0, matrix_d, out_d, imgSize);
}
hipDeviceSynchronize();
clock_t end_d = clock();
printf("Doing CPU filtering\n");
clock_t start_h = clock();
if (kernal == 3)
{
filter_cpu_ker3(matrix, cpuOut, imgSize);
}
if (kernal == 5)
{
filter_cpu_ker5(matrix, cpuOut, imgSize);
}
clock_t end_h = clock();
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
hipMemcpy(gpuOut, out_d, imgSize * imgSize * sizeof(int), hipMemcpyDeviceToHost);
hipFree(out_d);
hipFree(matrix_d);
matrix_to_img(cpu_file, imgSize, cpuOut);
matrix_to_img(gpu_file, imgSize, gpuOut);
printf("image size: %d kernal: %d GPU Time: %f CPU Time: %f\n", imgSize, kernal, time_d, time_h);
}
int main(int argc, char *argv[])
{
if (argc == 3)
{
int imgSize = atoi(argv[1]);
int kernal = atoi(argv[2]);
if(imgSize==1280){
if(kernal==3){
runRound(1280, "input/img1280.bmp", 3, "output/gpu_1280_3.bmp", "output/cpu_1280_3.bmp");
}
else if(kernal==5){
runRound(1280, "input/img1280.bmp", 5, "output/gpu_1280_5.bmp", "output/cpu_1280_5.bmp");
}
else{
cout<<"invalid kernal size\n";
}
}
else if(imgSize==640){
if(kernal==3){
runRound(640, "input/img640.bmp", 3, "output/gpu_640_3.bmp", "output/cpu_640_3.bmp");
}
else if(kernal==5){
runRound(640, "input/img640.bmp", 5, "output/gpu_640_5.bmp", "output/cpu_640_5.bmp");
}
else{
cout<<"invalid kernal size\n";
}
}
else{
cout<<"invalid image size\n";
}
}
else{
cout<<"invalid number of arguments\n";
}
return 0;
}
| d453bbd18e031c9542995d5a68e5c711f1a465c2.cu | /*
@author : Nilanka Manoj
@compile : nvcc meanfilter.cu -o build/meanfilter lib/EasyBMP.cpp
@run : ./build/meanfilter <<imgSize>> <<kernal>>
*/
#include <iostream>
#include <fstream>
#include "lib/EasyBMP.h"
#include <string>
#include <cuda.h>
using namespace std;
int *matrix, *cpuOut, *gpuOut;
int *matrix_d, *out_d;
__global__ void filter_gpu_ker3(int *matrix_in, int *matrix_out, int imgSize)
{
int i = max(imgSize / blockDim.x + 1, blockIdx.x) * blockDim.x + threadIdx.x;
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1]) /
9;
}
__global__ void filter_gpu_ker5(int *matrix_in, int *matrix_out, int imgSize)
{
int i = max((imgSize / blockDim.x) * 2 + 1, blockIdx.x) * blockDim.x + threadIdx.x;
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] + matrix_in[i + 2] + matrix_in[i - 2] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] + +matrix_in[i - imgSize + 2] + matrix_in[i - imgSize - 2] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1] + matrix_in[i + imgSize + 2] + matrix_in[i + imgSize - 2] +
matrix_in[i - imgSize * 2] + matrix_in[i - imgSize * 2 + 1] + matrix_in[i - imgSize * 2 - 1] + +matrix_in[i - imgSize * 2 + 2] + matrix_in[i - imgSize * 2 - 2] +
matrix_in[i + imgSize * 2] + matrix_in[i + imgSize * 2 + 1] + matrix_in[i + imgSize * 2 - 1] + matrix_in[i + imgSize * 2 + 2] + matrix_in[i + imgSize * 2 - 2]) /
25;
}
void filter_cpu_ker3(int *matrix_in, int *matrix_out, int imgSize)
{
for (int i = imgSize; i < imgSize * (imgSize - 1); i++)
{
int x = i % imgSize;
if (x != 0 && x != imgSize - 1)
{
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1]) /
9;
}
}
}
void filter_cpu_ker5(int *matrix_in, int *matrix_out, int imgSize)
{
for (int i = imgSize * 2; i < imgSize * (imgSize - 2); i++)
{
int x = i % imgSize;
if (x != 0 && x != imgSize - 1 && x != 1 && x != imgSize - 2)
{
matrix_out[i] = (matrix_in[i] + matrix_in[i + 1] + matrix_in[i - 1] + matrix_in[i + 2] + matrix_in[i - 2] +
matrix_in[i - imgSize] + matrix_in[i - imgSize + 1] + matrix_in[i - imgSize - 1] + +matrix_in[i - imgSize + 2] + matrix_in[i - imgSize - 2] +
matrix_in[i + imgSize] + matrix_in[i + imgSize + 1] + matrix_in[i + imgSize - 1] + matrix_in[i + imgSize + 2] + matrix_in[i + imgSize - 2] +
matrix_in[i - imgSize * 2] + matrix_in[i - imgSize * 2 + 1] + matrix_in[i - imgSize * 2 - 1] + +matrix_in[i - imgSize * 2 + 2] + matrix_in[i - imgSize * 2 - 2] +
matrix_in[i + imgSize * 2] + matrix_in[i + imgSize * 2 + 1] + matrix_in[i + imgSize * 2 - 1] + matrix_in[i + imgSize * 2 + 2] + matrix_in[i + imgSize * 2 - 2]) /
25;
}
}
}
void img_to_matrix(char *file, int imgSize, int *matrix)
{
BMP Image;
Image.ReadFromFile(file);
for (int i = 0; i < imgSize; i++)
{
for (int j = 0; j < imgSize; j++)
{
int b = Image(i, j)->Blue;
matrix[i * imgSize + j] = b;
}
}
cout << "matrix is extracted from :";
cout << file << endl;
}
void matrix_to_img(char *file, int imgSize, int *matrix)
{
BMP Image;
Image.SetSize(imgSize, imgSize);
for (int i = 0; i < imgSize; i++)
{
for (int j = 0; j < imgSize; j++)
{
int b = matrix[i * imgSize + j];
Image(i, j)->Blue = b;
Image(i, j)->Green = b;
Image(i, j)->Red = b;
}
}
Image.SetBitDepth(8);
CreateGrayscaleColorTable(Image);
Image.WriteToFile(file);
cout << "matrix is saved to :";
cout << file << endl;
}
void runRound(int imgSize, char *file, int kernal, char *gpu_file, char *cpu_file)
{
cout << "======================round starting====================================\n";
matrix = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
cpuOut = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
gpuOut = (int *)malloc((imgSize) * (imgSize) * sizeof(int));
img_to_matrix(file, imgSize, matrix);
cudaMalloc((void **)&matrix_d, (imgSize) * (imgSize) * sizeof(int));
cudaMalloc((void **)&out_d, (imgSize) * (imgSize) * sizeof(int));
cudaMemcpy(matrix_d, matrix, imgSize * imgSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(out_d, matrix, imgSize * imgSize * sizeof(int), cudaMemcpyHostToDevice);
cpuOut = matrix;
printf("Doing GPU Filtering\n");
clock_t start_d = clock();
if (kernal == 3)
{
filter_gpu_ker3<<<imgSize * imgSize / 64, 64>>>(matrix_d, out_d, imgSize);
}
else if (kernal == 5)
{
filter_gpu_ker5<<<imgSize * imgSize / 64, 64>>>(matrix_d, out_d, imgSize);
}
cudaThreadSynchronize();
clock_t end_d = clock();
printf("Doing CPU filtering\n");
clock_t start_h = clock();
if (kernal == 3)
{
filter_cpu_ker3(matrix, cpuOut, imgSize);
}
if (kernal == 5)
{
filter_cpu_ker5(matrix, cpuOut, imgSize);
}
clock_t end_h = clock();
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
cudaMemcpy(gpuOut, out_d, imgSize * imgSize * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(out_d);
cudaFree(matrix_d);
matrix_to_img(cpu_file, imgSize, cpuOut);
matrix_to_img(gpu_file, imgSize, gpuOut);
printf("image size: %d kernal: %d GPU Time: %f CPU Time: %f\n", imgSize, kernal, time_d, time_h);
}
int main(int argc, char *argv[])
{
if (argc == 3)
{
int imgSize = atoi(argv[1]);
int kernal = atoi(argv[2]);
if(imgSize==1280){
if(kernal==3){
runRound(1280, "input/img1280.bmp", 3, "output/gpu_1280_3.bmp", "output/cpu_1280_3.bmp");
}
else if(kernal==5){
runRound(1280, "input/img1280.bmp", 5, "output/gpu_1280_5.bmp", "output/cpu_1280_5.bmp");
}
else{
cout<<"invalid kernal size\n";
}
}
else if(imgSize==640){
if(kernal==3){
runRound(640, "input/img640.bmp", 3, "output/gpu_640_3.bmp", "output/cpu_640_3.bmp");
}
else if(kernal==5){
runRound(640, "input/img640.bmp", 5, "output/gpu_640_5.bmp", "output/cpu_640_5.bmp");
}
else{
cout<<"invalid kernal size\n";
}
}
else{
cout<<"invalid image size\n";
}
}
else{
cout<<"invalid number of arguments\n";
}
return 0;
}
|
3f56ed53aa361f01dc89edb312171ec3eb463678.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simulator a lottery system
//
// Author: Yili Zou
//
// For the GPU Programming class, NDSU Spring '14
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
#define FILE_CREATE_ERROR -1
#define Number_Max 9
#define Number_Min 0
#define THREADS_PER_BLOCK 10 // Setting the grid up
#define BLOCKS_PER_GRID 1
#define OFFSET 0 // No offset
__global__ void Setup_RNG(hiprandState_t *state, int seed)
{
// Setup of the random number generator. It seeds it, sets the sequence according to the thread id
hiprand_init(seed, threadIdx.x + blockIdx.x * THREADS_PER_BLOCK, OFFSET, &state[threadIdx.x + blockIdx.x * THREADS_PER_BLOCK]);
}
__global__ void RNG(hiprandState_t *state, int *result)
{
int id_k = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; // Here we calculate the id_k as to save calculations
hiprandState_t localState = state[id_k]; // Copy it to local memory to save global memory accesses (faster)
result[id_k] = hiprand(&localState)/(RAND_MAX/5); // Use the state to generate the random number AND updates the state,the range will be from 0 to 9, which is a dice
state[id_k] = localState; // Update the state in global memory. This allows the next generation to be uncorrelated to this generation
}
__global__ void Number_Matching(int *lucky_numbers, int *user_numbers, int *Matching_numbers)
{
//set up a counter to see how many numbers are matching
int counter=0; //initialize the counter, 0 is not matching, 1 is matching.
if(lucky_numbers[threadIdx.x]==user_numbers[threadIdx.x])
{
counter++;
}
Matching_numbers[threadIdx.x]= counter; //for every index that is matching, counter becomes 1, so this is a array of where these matching numbers are
}
int main()
{
//the array to store users number
int user_number[10];
//the array to store the lucky number
int price_number[10];
//define a address to store randomnumbers on the device
int *randomnumbers_d;
//how many numbers are matching
int numbers_matching[10];
//define stuff in device
int *price_number_d;
int *user_number_d;
int *numbers_matching_d;
//States
hiprandState_t *states_d;
// Allocate memory on the device
hipMalloc((void **)&randomnumbers_d, THREADS_PER_BLOCK*sizeof( int));
hipMalloc((void **)&states_d, THREADS_PER_BLOCK*sizeof(hiprandState_t));
// Set up grid and block
dim3 dimGrid(BLOCKS_PER_GRID);
dim3 dimBlock(THREADS_PER_BLOCK);
// Set up RNG
hipLaunchKernelGGL(( Setup_RNG), dim3(dimGrid), dim3(dimBlock), 0, 0, states_d, time(NULL));
hipLaunchKernelGGL(( RNG), dim3(dimGrid), dim3(dimBlock), 0, 0, states_d, randomnumbers_d); // Launch RNG
//copy results back
hipMemcpy(price_number, randomnumbers_d, THREADS_PER_BLOCK*sizeof(unsigned int), hipMemcpyDeviceToHost);
//user interface
printf("\nThe 10 lucky number have been generated, please input your lucky numbers!!(from 0 to 9)\n");
//encourage user to input numbers
int input; //the input by users
for(int i=0; i<10; i++)
{
while(1)
{
scanf("%d", &input); //scan the input
if(input<0 || input >9)
{
printf("\n Please enter numbers within 0 to 9!\n"); //encourage to input valide number
}
else
break;
}
user_number[i]=input;
}
printf("\nYour lucky numbers have been picked, waiting for results\n");
// Allocate memory on the device
hipMalloc((void **)&numbers_matching_d, 10*sizeof( int));
hipMalloc((void **)&price_number_d, THREADS_PER_BLOCK*sizeof( int));
hipMalloc((void **)&user_number_d, THREADS_PER_BLOCK*sizeof( int));
//copy the parameters in the device
hipMemcpy(user_number_d, user_number, THREADS_PER_BLOCK*sizeof(int ), hipMemcpyHostToDevice);
hipMemcpy(price_number_d, price_number, THREADS_PER_BLOCK*sizeof(int ), hipMemcpyHostToDevice);
//Launch number matching kernel
hipLaunchKernelGGL(( Number_Matching), dim3(dimGrid), dim3(dimBlock), 0, 0, price_number_d, user_number_d, numbers_matching_d);
//copy the result
hipMemcpy(numbers_matching, numbers_matching_d, 10*sizeof(int), hipMemcpyDeviceToHost);
//clean up memory
hipFree(numbers_matching_d);
hipFree(price_number_d);
hipFree(user_number_d);
//how many numbers matching
int nMatch=0;
//show result
printf("lucky numbers:\n");
for(int i=0; i<10; i++)
{
nMatch+=numbers_matching[i]; //numbers_matching[i] is going to be either 1 or 0, so sum them all up we can get the totally numbers matching
printf("%d\n",price_number[i]);
}
printf("\n You have %d numbers matching!", nMatch);
if(nMatch==10) //when all matches, win the price, which is not likely to happen
{
printf("\n Conflagrations! You have won 1 Million dollars! \n");
}
return EXIT_SUCCESS;
}
| 3f56ed53aa361f01dc89edb312171ec3eb463678.cu | // Simulator a lottery system
//
// Author: Yili Zou
//
// For the GPU Programming class, NDSU Spring '14
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <curand_kernel.h>
#define FILE_CREATE_ERROR -1
#define Number_Max 9
#define Number_Min 0
#define THREADS_PER_BLOCK 10 // Setting the grid up
#define BLOCKS_PER_GRID 1
#define OFFSET 0 // No offset
__global__ void Setup_RNG(curandState *state, int seed)
{
// Setup of the random number generator. It seeds it, sets the sequence according to the thread id
curand_init(seed, threadIdx.x + blockIdx.x * THREADS_PER_BLOCK, OFFSET, &state[threadIdx.x + blockIdx.x * THREADS_PER_BLOCK]);
}
__global__ void RNG(curandState *state, int *result)
{
int id_k = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; // Here we calculate the id_k as to save calculations
curandState localState = state[id_k]; // Copy it to local memory to save global memory accesses (faster)
result[id_k] = curand(&localState)/(RAND_MAX/5); // Use the state to generate the random number AND updates the state,the range will be from 0 to 9, which is a dice
state[id_k] = localState; // Update the state in global memory. This allows the next generation to be uncorrelated to this generation
}
__global__ void Number_Matching(int *lucky_numbers, int *user_numbers, int *Matching_numbers)
{
//set up a counter to see how many numbers are matching
int counter=0; //initialize the counter, 0 is not matching, 1 is matching.
if(lucky_numbers[threadIdx.x]==user_numbers[threadIdx.x])
{
counter++;
}
Matching_numbers[threadIdx.x]= counter; //for every index that is matching, counter becomes 1, so this is a array of where these matching numbers are
}
int main()
{
//the array to store users number
int user_number[10];
//the array to store the lucky number
int price_number[10];
//define a address to store randomnumbers on the device
int *randomnumbers_d;
//how many numbers are matching
int numbers_matching[10];
//define stuff in device
int *price_number_d;
int *user_number_d;
int *numbers_matching_d;
//States
curandState *states_d;
// Allocate memory on the device
cudaMalloc((void **)&randomnumbers_d, THREADS_PER_BLOCK*sizeof( int));
cudaMalloc((void **)&states_d, THREADS_PER_BLOCK*sizeof(curandState));
// Set up grid and block
dim3 dimGrid(BLOCKS_PER_GRID);
dim3 dimBlock(THREADS_PER_BLOCK);
// Set up RNG
Setup_RNG<<<dimGrid, dimBlock>>>(states_d, time(NULL));
RNG<<<dimGrid, dimBlock>>>(states_d, randomnumbers_d); // Launch RNG
//copy results back
cudaMemcpy(price_number, randomnumbers_d, THREADS_PER_BLOCK*sizeof(unsigned int), cudaMemcpyDeviceToHost);
//user interface
printf("\nThe 10 lucky number have been generated, please input your lucky numbers!!(from 0 to 9)\n");
//encourage user to input numbers
int input; //the input by users
for(int i=0; i<10; i++)
{
while(1)
{
scanf("%d", &input); //scan the input
if(input<0 || input >9)
{
printf("\n Please enter numbers within 0 to 9!\n"); //encourage to input valide number
}
else
break;
}
user_number[i]=input;
}
printf("\nYour lucky numbers have been picked, waiting for results\n");
// Allocate memory on the device
cudaMalloc((void **)&numbers_matching_d, 10*sizeof( int));
cudaMalloc((void **)&price_number_d, THREADS_PER_BLOCK*sizeof( int));
cudaMalloc((void **)&user_number_d, THREADS_PER_BLOCK*sizeof( int));
//copy the parameters in the device
cudaMemcpy(user_number_d, user_number, THREADS_PER_BLOCK*sizeof(int ), cudaMemcpyHostToDevice);
cudaMemcpy(price_number_d, price_number, THREADS_PER_BLOCK*sizeof(int ), cudaMemcpyHostToDevice);
//Launch number matching kernel
Number_Matching<<<dimGrid, dimBlock>>>(price_number_d, user_number_d, numbers_matching_d);
//copy the result
cudaMemcpy(numbers_matching, numbers_matching_d, 10*sizeof(int), cudaMemcpyDeviceToHost);
//clean up memory
cudaFree(numbers_matching_d);
cudaFree(price_number_d);
cudaFree(user_number_d);
//how many numbers matching
int nMatch=0;
//show result
printf("lucky numbers:\n");
for(int i=0; i<10; i++)
{
nMatch+=numbers_matching[i]; //numbers_matching[i] is going to be either 1 or 0, so sum them all up we can get the totally numbers matching
printf("%d\n",price_number[i]);
}
printf("\n You have %d numbers matching!", nMatch);
if(nMatch==10) //when all matches, win the price, which is not likely to happen
{
printf("\n Conflagrations! You have won 1 Million dollars! \n");
}
return EXIT_SUCCESS;
}
|
863088f9d36f6caa8a5a0d7bab68725bd50b1133.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/gather.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <cudf/utilities/error.hpp>
#include <stdexcept>
#include "nvstrings/NVStrings.h"
#include "../custring_view.cuh"
#include "../util.h"
#include "./NVStringsImpl.h"
// takes scattered pointers to custring_view objects and
// initializes a new NVStringsImpl
void NVStrings_init_from_custrings(NVStringsImpl* pImpl,
custring_view_array d_strings,
unsigned int count)
{
auto execpol = rmm::exec_policy(0);
// get individual sizes
rmm::device_vector<size_t> sizes(count, 0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
[d_strings, d_sizes] __device__(unsigned int idx) {
custring_view* dstr = d_strings[idx];
if (dstr) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output object
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if (d_buffer == 0) return; // this is valid
// create offsets
rmm::device_vector<size_t> offsets(count, 0);
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin());
// finally, copy the strings
custring_view_array d_results = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
[d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx) {
custring_view* dstr = d_strings[idx];
if (!dstr) return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = custring_view::create_from(buffer, *dstr);
});
//
}
// create a new instance containing only the strings at the specified positions
// position values can be in any order and can even be repeated
NVStrings* NVStrings::gather(const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (count == 0 || elements == 0 || pos == 0) return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), hipMemcpyHostToDevice))
}
// create working memory
rmm::device_vector<custring_view*> results(elements, nullptr);
auto d_results = results.data().get();
rmm::device_vector<bool> flags(elements, false);
auto d_flags = flags.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
// do the gather
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx) {
int pos = d_pos[idx];
if ((pos < 0) || (pos >= count))
d_flags[idx] = true;
else
d_results[idx] = d_strings[pos];
});
// check for invalid position values
if (thrust::count(execpol->on(0), flags.begin(), flags.end(), true)) {
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
throw std::out_of_range("gather position value out of range");
}
// build resulting instance
NVStrings* rtn = new NVStrings(elements);
NVStrings_init_from_custrings(rtn->pImpl, d_results, elements);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
return rtn;
}
// create a new instance containing only the strings where the corresponding mask value is true
NVStrings* NVStrings::gather(const bool* mask, bool bdevmem)
{
size_t count = size();
if (count == 0 || mask == nullptr) return new NVStrings(0);
// copy mask array to device memory if necessary
auto execpol = rmm::exec_policy(0);
const bool* d_mask = mask;
if (!bdevmem) {
d_mask = const_cast<const bool*>(device_alloc<bool>(count, 0));
CUDA_TRY(
hipMemcpyAsync((void*)d_mask, mask, count * sizeof(mask[0]), hipMemcpyHostToDevice, 0))
}
// create list of index positions from the mask array
rmm::device_vector<int> indexes(count);
auto d_indexes = indexes.data().get();
auto d_indexes_end = thrust::copy_if(execpol->on(0),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(count),
d_indexes,
[d_mask] __device__(int idx) { return d_mask[idx]; });
// done with the mask
if (!bdevmem) RMM_FREE((void*)d_mask, 0);
count = d_indexes_end - d_indexes;
return gather(d_indexes, count, true);
}
//
// s1 = ['a','b,'c','d']
// s2 = ['e','f']
// pos = [1,3] -- must be the same length as s2
// s3 = s1.scatter(s2,pos)
// ['a','e','c','f']
//
NVStrings* NVStrings::scatter(NVStrings& strs, const int* pos, bool bdevmem)
{
unsigned int count = size();
unsigned int elements = strs.size();
if (pos == 0) throw std::invalid_argument("position parameter cannot be null");
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), hipMemcpyHostToDevice))
}
// The most efficient method here is to build pointer array
// applying the parameters to the specified positions and
// then build a new instance from the resulting pointers.
rmm::device_vector<custring_view*> results(count, nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_new_strings = strs.pImpl->getStringsPtr();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::scatter(execpol->on(0), d_new_strings, d_new_strings + elements, d_pos, d_results);
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
return rtn;
}
//
// s1 = ['a','b,'c','d']
// pos = [1,3]
// s3 = s1.scatter('e',pos,2)
// ['a','e','c','e']
//
NVStrings* NVStrings::scatter(const char* str, const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (pos == nullptr) throw std::invalid_argument("parameter cannot be null");
auto execpol = rmm::exec_policy(0);
// copy string to device
custring_view* d_repl = custring_from_host(str);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), hipMemcpyHostToDevice))
}
// create result output array
rmm::device_vector<custring_view*> results(count, nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[d_pos, count, d_repl, d_results] __device__(unsigned int idx) {
int pos = d_pos[idx];
if ((pos >= 0) && (pos < count)) d_results[pos] = d_repl;
});
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
RMM_FREE((void*)d_repl, 0);
return rtn;
}
NVStrings* NVStrings::sublist(unsigned int start, unsigned int end, int step)
{
unsigned int count = size();
if (end > count) end = count;
if (start > count) start = count;
if (step == 0) step = 1;
if (start == end) return new NVStrings(0);
if (((step > 0) && (start > end)) || ((step < 0) && (start < end))) return new NVStrings(0);
unsigned int elems = (unsigned int)std::abs((int)(end - start));
unsigned int abs_step = (unsigned int)std::abs(step);
elems = (elems + abs_step - 1) / abs_step; // adjust for steps
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> indexes(elems);
thrust::sequence(execpol->on(0), indexes.begin(), indexes.end(), (int)start, step);
return gather(indexes.data().get(), elems, true);
}
// remove the specified strings and return a new instance
NVStrings* NVStrings::remove_strings(const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (count == 0) return new NVStrings(0);
if (elements == 0 || pos == 0) return copy();
auto execpol = rmm::exec_policy(0);
int* dpos = device_alloc<int>(elements, 0);
if (bdevmem)
CUDA_TRY(
hipMemcpyAsync((void*)dpos, pos, elements * sizeof(unsigned int), hipMemcpyDeviceToDevice))
else
CUDA_TRY(
hipMemcpyAsync((void*)dpos, pos, elements * sizeof(unsigned int), hipMemcpyHostToDevice))
// sort the position values
thrust::sort(execpol->on(0), dpos, dpos + elements, thrust::greater<int>());
// also should remove duplicates
int* nend = thrust::unique(execpol->on(0), dpos, dpos + elements, thrust::equal_to<int>());
elements = (unsigned int)(nend - dpos);
if (count < elements) {
RMM_FREE(dpos, 0);
fprintf(stderr,
"remove_strings: more positions (%u) specified than the number of strings (%u)\n",
elements,
count);
return nullptr;
}
// build array to hold positions which are not to be removed by marking deleted positions with -1
rmm::device_vector<int> dnpos(count);
thrust::sequence(execpol->on(0), dnpos.begin(), dnpos.end());
int* d_npos = dnpos.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[dpos, d_npos, count] __device__(unsigned int idx) {
int pos = dpos[idx];
if ((pos >= 0) && (pos < count)) d_npos[pos] = -1;
});
// now remove the positions marked with -1
int* dend = thrust::remove_if(
execpol->on(0), d_npos, d_npos + count, [] __device__(int val) { return val < 0; });
unsigned int new_count = (unsigned int)(dend - d_npos);
// gather string pointers based on indexes in dnpos (new-positions)
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<custring_view*> results(new_count, nullptr);
custring_view_array d_results = results.data().get();
thrust::gather(execpol->on(0), d_npos, d_npos + new_count, d_strings, d_results);
// create output object from results pointers
NVStrings* rtn = new NVStrings(new_count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count);
RMM_FREE(dpos, 0);
return rtn;
}
// this sorts the strings into a new instance;
// a sorted strings list can improve performance by reducing divergence
NVStrings* NVStrings::sort(sorttype stype, bool ascending, bool nullfirst)
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// copy the pointers so we can sort them
rmm::device_vector<custring_view*> results(count, nullptr);
custring_view_array d_results = results.data().get();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::sort(
execpol->on(0),
d_results,
d_results + count,
[stype, ascending, nullfirst] __device__(custring_view * &lhs, custring_view * &rhs) {
if (lhs == 0 || rhs == 0) return (nullfirst ? rhs != 0 : lhs != 0); // null < non-null
// allow sorting by name and length
int diff = 0;
if (stype & NVStrings::length) diff = lhs->size() - rhs->size();
if (diff == 0 && (stype & NVStrings::name)) diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
// build new instance from the sorted pointers
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
return rtn;
}
// just provide the index order and leave the strings intact
int NVStrings::order(
sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice)
{
unsigned int count = size();
unsigned int* d_indexes = indexes;
auto execpol = rmm::exec_policy(0);
if (!todevice) d_indexes = device_alloc<unsigned int>(count, 0);
thrust::sequence(execpol->on(0), d_indexes, d_indexes + count);
//
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::sort(
execpol->on(0),
d_indexes,
d_indexes + count,
[d_strings, stype, ascending, nullfirst] __device__(unsigned int& lidx, unsigned int& ridx) {
custring_view* lhs = d_strings[lidx];
custring_view* rhs = d_strings[ridx];
if (lhs == 0 || rhs == 0) return (nullfirst ? rhs != 0 : lhs != 0);
// allow sorting by name and length
int diff = 0;
if (stype & NVStrings::length) diff = lhs->size() - rhs->size();
if (diff == 0 && (stype & NVStrings::name)) diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
//
if (!todevice) {
CUDA_TRY(
hipMemcpyAsync(indexes, d_indexes, count * sizeof(unsigned int), hipMemcpyDeviceToHost))
RMM_FREE(d_indexes, 0);
}
return 0;
}
| 863088f9d36f6caa8a5a0d7bab68725bd50b1133.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/gather.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <cudf/utilities/error.hpp>
#include <stdexcept>
#include "nvstrings/NVStrings.h"
#include "../custring_view.cuh"
#include "../util.h"
#include "./NVStringsImpl.h"
// takes scattered pointers to custring_view objects and
// initializes a new NVStringsImpl
void NVStrings_init_from_custrings(NVStringsImpl* pImpl,
custring_view_array d_strings,
unsigned int count)
{
auto execpol = rmm::exec_policy(0);
// get individual sizes
rmm::device_vector<size_t> sizes(count, 0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
[d_strings, d_sizes] __device__(unsigned int idx) {
custring_view* dstr = d_strings[idx];
if (dstr) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output object
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if (d_buffer == 0) return; // this is valid
// create offsets
rmm::device_vector<size_t> offsets(count, 0);
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin());
// finally, copy the strings
custring_view_array d_results = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
[d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx) {
custring_view* dstr = d_strings[idx];
if (!dstr) return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = custring_view::create_from(buffer, *dstr);
});
//
}
// create a new instance containing only the strings at the specified positions
// position values can be in any order and can even be repeated
NVStrings* NVStrings::gather(const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (count == 0 || elements == 0 || pos == 0) return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), cudaMemcpyHostToDevice))
}
// create working memory
rmm::device_vector<custring_view*> results(elements, nullptr);
auto d_results = results.data().get();
rmm::device_vector<bool> flags(elements, false);
auto d_flags = flags.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
// do the gather
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx) {
int pos = d_pos[idx];
if ((pos < 0) || (pos >= count))
d_flags[idx] = true;
else
d_results[idx] = d_strings[pos];
});
// check for invalid position values
if (thrust::count(execpol->on(0), flags.begin(), flags.end(), true)) {
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
throw std::out_of_range("gather position value out of range");
}
// build resulting instance
NVStrings* rtn = new NVStrings(elements);
NVStrings_init_from_custrings(rtn->pImpl, d_results, elements);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
return rtn;
}
// create a new instance containing only the strings where the corresponding mask value is true
NVStrings* NVStrings::gather(const bool* mask, bool bdevmem)
{
size_t count = size();
if (count == 0 || mask == nullptr) return new NVStrings(0);
// copy mask array to device memory if necessary
auto execpol = rmm::exec_policy(0);
const bool* d_mask = mask;
if (!bdevmem) {
d_mask = const_cast<const bool*>(device_alloc<bool>(count, 0));
CUDA_TRY(
cudaMemcpyAsync((void*)d_mask, mask, count * sizeof(mask[0]), cudaMemcpyHostToDevice, 0))
}
// create list of index positions from the mask array
rmm::device_vector<int> indexes(count);
auto d_indexes = indexes.data().get();
auto d_indexes_end = thrust::copy_if(execpol->on(0),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(count),
d_indexes,
[d_mask] __device__(int idx) { return d_mask[idx]; });
// done with the mask
if (!bdevmem) RMM_FREE((void*)d_mask, 0);
count = d_indexes_end - d_indexes;
return gather(d_indexes, count, true);
}
//
// s1 = ['a','b,'c','d']
// s2 = ['e','f']
// pos = [1,3] -- must be the same length as s2
// s3 = s1.scatter(s2,pos)
// ['a','e','c','f']
//
NVStrings* NVStrings::scatter(NVStrings& strs, const int* pos, bool bdevmem)
{
unsigned int count = size();
unsigned int elements = strs.size();
if (pos == 0) throw std::invalid_argument("position parameter cannot be null");
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), cudaMemcpyHostToDevice))
}
// The most efficient method here is to build pointer array
// applying the parameters to the specified positions and
// then build a new instance from the resulting pointers.
rmm::device_vector<custring_view*> results(count, nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_new_strings = strs.pImpl->getStringsPtr();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::scatter(execpol->on(0), d_new_strings, d_new_strings + elements, d_pos, d_results);
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
return rtn;
}
//
// s1 = ['a','b,'c','d']
// pos = [1,3]
// s3 = s1.scatter('e',pos,2)
// ['a','e','c','e']
//
NVStrings* NVStrings::scatter(const char* str, const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (pos == nullptr) throw std::invalid_argument("parameter cannot be null");
auto execpol = rmm::exec_policy(0);
// copy string to device
custring_view* d_repl = custring_from_host(str);
const int* d_pos = pos;
if (!bdevmem) { // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements, 0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos, pos, elements * sizeof(int), cudaMemcpyHostToDevice))
}
// create result output array
rmm::device_vector<custring_view*> results(count, nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[d_pos, count, d_repl, d_results] __device__(unsigned int idx) {
int pos = d_pos[idx];
if ((pos >= 0) && (pos < count)) d_results[pos] = d_repl;
});
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if (!bdevmem) RMM_FREE((void*)d_pos, 0);
RMM_FREE((void*)d_repl, 0);
return rtn;
}
NVStrings* NVStrings::sublist(unsigned int start, unsigned int end, int step)
{
unsigned int count = size();
if (end > count) end = count;
if (start > count) start = count;
if (step == 0) step = 1;
if (start == end) return new NVStrings(0);
if (((step > 0) && (start > end)) || ((step < 0) && (start < end))) return new NVStrings(0);
unsigned int elems = (unsigned int)std::abs((int)(end - start));
unsigned int abs_step = (unsigned int)std::abs(step);
elems = (elems + abs_step - 1) / abs_step; // adjust for steps
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> indexes(elems);
thrust::sequence(execpol->on(0), indexes.begin(), indexes.end(), (int)start, step);
return gather(indexes.data().get(), elems, true);
}
// remove the specified strings and return a new instance
NVStrings* NVStrings::remove_strings(const int* pos, unsigned int elements, bool bdevmem)
{
unsigned int count = size();
if (count == 0) return new NVStrings(0);
if (elements == 0 || pos == 0) return copy();
auto execpol = rmm::exec_policy(0);
int* dpos = device_alloc<int>(elements, 0);
if (bdevmem)
CUDA_TRY(
cudaMemcpyAsync((void*)dpos, pos, elements * sizeof(unsigned int), cudaMemcpyDeviceToDevice))
else
CUDA_TRY(
cudaMemcpyAsync((void*)dpos, pos, elements * sizeof(unsigned int), cudaMemcpyHostToDevice))
// sort the position values
thrust::sort(execpol->on(0), dpos, dpos + elements, thrust::greater<int>());
// also should remove duplicates
int* nend = thrust::unique(execpol->on(0), dpos, dpos + elements, thrust::equal_to<int>());
elements = (unsigned int)(nend - dpos);
if (count < elements) {
RMM_FREE(dpos, 0);
fprintf(stderr,
"remove_strings: more positions (%u) specified than the number of strings (%u)\n",
elements,
count);
return nullptr;
}
// build array to hold positions which are not to be removed by marking deleted positions with -1
rmm::device_vector<int> dnpos(count);
thrust::sequence(execpol->on(0), dnpos.begin(), dnpos.end());
int* d_npos = dnpos.data().get();
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
elements,
[dpos, d_npos, count] __device__(unsigned int idx) {
int pos = dpos[idx];
if ((pos >= 0) && (pos < count)) d_npos[pos] = -1;
});
// now remove the positions marked with -1
int* dend = thrust::remove_if(
execpol->on(0), d_npos, d_npos + count, [] __device__(int val) { return val < 0; });
unsigned int new_count = (unsigned int)(dend - d_npos);
// gather string pointers based on indexes in dnpos (new-positions)
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<custring_view*> results(new_count, nullptr);
custring_view_array d_results = results.data().get();
thrust::gather(execpol->on(0), d_npos, d_npos + new_count, d_strings, d_results);
// create output object from results pointers
NVStrings* rtn = new NVStrings(new_count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count);
RMM_FREE(dpos, 0);
return rtn;
}
// this sorts the strings into a new instance;
// a sorted strings list can improve performance by reducing divergence
NVStrings* NVStrings::sort(sorttype stype, bool ascending, bool nullfirst)
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// copy the pointers so we can sort them
rmm::device_vector<custring_view*> results(count, nullptr);
custring_view_array d_results = results.data().get();
thrust::copy(execpol->on(0), d_strings, d_strings + count, d_results);
thrust::sort(
execpol->on(0),
d_results,
d_results + count,
[stype, ascending, nullfirst] __device__(custring_view * &lhs, custring_view * &rhs) {
if (lhs == 0 || rhs == 0) return (nullfirst ? rhs != 0 : lhs != 0); // null < non-null
// allow sorting by name and length
int diff = 0;
if (stype & NVStrings::length) diff = lhs->size() - rhs->size();
if (diff == 0 && (stype & NVStrings::name)) diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
// build new instance from the sorted pointers
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
return rtn;
}
// just provide the index order and leave the strings intact
int NVStrings::order(
sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice)
{
unsigned int count = size();
unsigned int* d_indexes = indexes;
auto execpol = rmm::exec_policy(0);
if (!todevice) d_indexes = device_alloc<unsigned int>(count, 0);
thrust::sequence(execpol->on(0), d_indexes, d_indexes + count);
//
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::sort(
execpol->on(0),
d_indexes,
d_indexes + count,
[d_strings, stype, ascending, nullfirst] __device__(unsigned int& lidx, unsigned int& ridx) {
custring_view* lhs = d_strings[lidx];
custring_view* rhs = d_strings[ridx];
if (lhs == 0 || rhs == 0) return (nullfirst ? rhs != 0 : lhs != 0);
// allow sorting by name and length
int diff = 0;
if (stype & NVStrings::length) diff = lhs->size() - rhs->size();
if (diff == 0 && (stype & NVStrings::name)) diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
//
if (!todevice) {
CUDA_TRY(
cudaMemcpyAsync(indexes, d_indexes, count * sizeof(unsigned int), cudaMemcpyDeviceToHost))
RMM_FREE(d_indexes, 0);
}
return 0;
}
|
e604b2632ecd2b82e48c8be02dacd110d2e815f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NEGINF -9999.0f
__global__ void initialAssignment(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int* blockSize) {
int row1;
int col2;
int curRow;
int m1 = blockIdx.x * blockSize[0];
int m2 = (blockIdx.x + 1) * blockSize[0];
// Check of all have been assigned
#ifdef DEBUG
printf("\n BS%d M1 %d, M2 %d\n", blockSize[0], m1, m2);
#endif
for (int i = m1; i < m2; i++)
{
row1 = i;
if (diffs[i] != NEGINF)
{
#ifdef DEBUG
printf("P%d != -1\n", i);
#endif
col2 = bestChanges[row1]; // index of column of the best
// cell in the row of difference
if (objects[col2] == -1)
{
persons[row1] = col2;
objects[col2] = row1;
} else
{
curRow = objects[col2];
if (diffs[curRow] < diffs[row1])
{
//Swap
if (curRow != -1)
{
#ifdef DEBUG
printf("P%d == -1\n", curRow);
#endif
persons[curRow] = -1;
bannedSwitches[row1 * A.width + curRow] = true;
reset[0] = 1;
}
persons[row1] = col2;
objects[col2] = row1;
}
}
}
#ifdef DEBUG
printf("P%d->%d\n", i, persons[i]);
#endif
}
}
| e604b2632ecd2b82e48c8be02dacd110d2e815f6.cu | #include <stdio.h>
#define NEGINF -9999.0f
__global__ void initialAssignment(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int* blockSize) {
int row1;
int col2;
int curRow;
int m1 = blockIdx.x * blockSize[0];
int m2 = (blockIdx.x + 1) * blockSize[0];
// Check of all have been assigned
#ifdef DEBUG
printf("\n BS%d M1 %d, M2 %d\n", blockSize[0], m1, m2);
#endif
for (int i = m1; i < m2; i++)
{
row1 = i;
if (diffs[i] != NEGINF)
{
#ifdef DEBUG
printf("P%d != -1\n", i);
#endif
col2 = bestChanges[row1]; // index of column of the best
// cell in the row of difference
if (objects[col2] == -1)
{
persons[row1] = col2;
objects[col2] = row1;
} else
{
curRow = objects[col2];
if (diffs[curRow] < diffs[row1])
{
//Swap
if (curRow != -1)
{
#ifdef DEBUG
printf("P%d == -1\n", curRow);
#endif
persons[curRow] = -1;
bannedSwitches[row1 * A.width + curRow] = true;
reset[0] = 1;
}
persons[row1] = col2;
objects[col2] = row1;
}
}
}
#ifdef DEBUG
printf("P%d->%d\n", i, persons[i]);
#endif
}
}
|
10e61802e2d597efc06d47993c31bb95ee7fa16b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tileMatMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matA = NULL;
hipMalloc(&matA, XSIZE*YSIZE);
float *matB = NULL;
hipMalloc(&matB, XSIZE*YSIZE);
float *matC = NULL;
hipMalloc(&matC, XSIZE*YSIZE);
int aRows = 1;
int aCols = 1;
int bRows = 1;
int bCols = 1;
int cRows = 1;
int cCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
tileMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
tileMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
tileMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 10e61802e2d597efc06d47993c31bb95ee7fa16b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "tileMatMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matA = NULL;
cudaMalloc(&matA, XSIZE*YSIZE);
float *matB = NULL;
cudaMalloc(&matB, XSIZE*YSIZE);
float *matC = NULL;
cudaMalloc(&matC, XSIZE*YSIZE);
int aRows = 1;
int aCols = 1;
int bRows = 1;
int bCols = 1;
int cRows = 1;
int cCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
tileMatMul<<<gridBlock,threadBlock>>>(matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
tileMatMul<<<gridBlock,threadBlock>>>(matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
tileMatMul<<<gridBlock,threadBlock>>>(matA,matB,matC,aRows,aCols,bRows,bCols,cRows,cCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
80d95ac434074809fb6a2bc1188ad6d8ae4c7018.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/cross_entropy.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/softmax_impl.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndex(T* predicted_logits,
const T* logit,
const IndexT* label,
const IndexT ignore_index,
const int start_index,
const int end_index,
const int64_t N,
const int64_t D,
const int nranks) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE(((real_label < D * nranks) && (real_label >= 0)) ||
(real_label == ignore_index),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the class number. It should "
"be less than [%ld] or equal to [%ld], but received [%ld]",
static_cast<int64_t>(D * nranks),
static_cast<int64_t>(ignore_index),
static_cast<int64_t>(real_label));
if (real_label >= start_index && real_label < end_index) {
predicted_logits[i] = logit[i * D + real_label - start_index];
}
}
}
template <typename T, typename IndexT>
__global__ void CaculateLoss(T* loss,
const T* predict_logits,
const T* sum_exp_logits,
const IndexT* label,
const int64_t ignore_index,
const int N) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = static_cast<int64_t>(label[i]);
loss[i] = ignore_index == real_label
? static_cast<T>(0)
: phi::funcs::TolerableValue<T>()(
phi::funcs::TolerableValue<T>()(
phi::funcs::real_log(sum_exp_logits[i])) -
predict_logits[i]);
}
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndexGrad(T* logits_grad,
const T* loss_grad,
const IndexT* labels,
const int start_index,
const int end_index,
const int64_t N,
const int64_t D,
const int64_t ignore_index) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
auto lbl = static_cast<int64_t>(labels[row]);
if (lbl == ignore_index) {
logits_grad[i] = static_cast<T>(0.0);
} else if ((col + start_index) == labels[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
} else {
logits_grad[i] *= loss_grad[row];
}
}
}
template <typename T, typename DeviceContext>
class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const int rid = ctx.Attr<int>("ring_id");
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
} else {
CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
}
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const phi::DenseTensor* logits = ctx.Input<phi::DenseTensor>("Logits");
const phi::DenseTensor* labels = ctx.Input<phi::DenseTensor>("Label");
phi::DenseTensor* softmax = ctx.Output<phi::DenseTensor>("Softmax");
phi::DenseTensor* loss = ctx.Output<phi::DenseTensor>("Loss");
const int64_t ignore_index = ctx.Attr<int64_t>("ignore_index");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place);
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
// use global calculate stream
const auto stream = static_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(place))
->stream();
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
phi::DenseTensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = phi::funcs::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = phi::funcs::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
phi::DenseTensor logits_max;
logits_max = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* logits_max_buff = logits_max.mutable_data<T>(place);
auto eigen_logits_max = phi::funcs::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
logits_max_buff,
logits_max_buff,
logits_max.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(logits_max.dtype())),
ncclMax,
comm->comm(),
stream));
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(phi::funcs::ValueClip<T>());
// step 3, obtain predict target
phi::DenseTensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int32_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int64_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int64_t>(),
ignore_index,
start_index,
end_index,
N,
D,
nranks);
}
void* predict_logits_buff = predicted_logits.mutable_data<T>(place);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
predict_logits_buff,
predict_logits_buff,
predicted_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(predicted_logits.dtype())),
ncclSum,
comm->comm(),
stream));
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
phi::DenseTensor sum_exp_logits;
sum_exp_logits = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits =
phi::funcs::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sum_exp_logits_buff,
sum_exp_logits_buff,
sum_exp_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum,
comm->comm(),
stream));
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( CaculateLoss<T, int32_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int32_t>(),
ignore_index,
N);
} else {
hipLaunchKernelGGL(( CaculateLoss<T, int64_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int64_t>(),
ignore_index,
N);
}
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const phi::DenseTensor* logits = ctx.Input<phi::DenseTensor>("Logits");
const phi::DenseTensor* labels = ctx.Input<phi::DenseTensor>("Label");
phi::DenseTensor* softmax = ctx.Output<phi::DenseTensor>("Softmax");
phi::DenseTensor* loss = ctx.Output<phi::DenseTensor>("Loss");
const int64_t ignore_index = ctx.Attr<int64_t>("ignore_index");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto map = distributed::ProcessGroupMapFromGid::getInstance();
distributed::ProcessGroup* pg = map->get(rid);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::MAX;
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
phi::DenseTensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = phi::funcs::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = phi::funcs::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
phi::DenseTensor logits_max;
logits_max = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
auto eigen_logits_max = phi::funcs::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
std::vector<phi::DenseTensor> in_out;
in_out.push_back(logits_max);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(phi::funcs::ValueClip<T>());
// step 3, obtain predict target
phi::DenseTensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int32_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int64_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
}
in_out.clear();
in_out.push_back(predicted_logits);
opts.reduce_op = distributed::ReduceOp::SUM;
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
phi::DenseTensor sum_exp_logits;
sum_exp_logits = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits =
phi::funcs::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
in_out.clear();
in_out.push_back(sum_exp_logits);
opts.reduce_op = distributed::ReduceOp::SUM;
pg->AllReduce(in_out, in_out, opts)->Synchronize();
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( CaculateLoss<T, int32_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int32_t>(),
ignore_index,
N);
} else {
hipLaunchKernelGGL(( CaculateLoss<T, int64_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int64_t>(),
ignore_index,
N);
}
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T, typename DeviceContext>
class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const phi::DenseTensor* labels = context.Input<phi::DenseTensor>("Label");
const phi::DenseTensor* loss_grad =
context.Input<phi::DenseTensor>(framework::GradVarName("Loss"));
phi::DenseTensor* logit_grad =
context.Output<phi::DenseTensor>(framework::GradVarName("Logits"));
const phi::DenseTensor* softmax =
context.Input<phi::DenseTensor>("Softmax");
const int64_t ignore_index = context.Attr<int64_t>("ignore_index");
const int rank = context.Attr<int>("rank");
auto& dev_ctx = context.template device_context<phi::GPUContext>();
if (logit_grad != softmax) {
framework::TensorCopy(
*softmax, context.GetPlace(), context.device_context(), logit_grad);
}
const auto sofrmax_dims = softmax->dims();
const int axis = sofrmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims);
phi::DenseTensor logit_grad_2d;
logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D});
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
const int start_index = rank * D;
const int end_index = start_index + D;
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndexGrad<T, int32_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad_2d.data<T>(),
loss_grad->data<T>(),
labels->data<int32_t>(),
start_index,
end_index,
N,
D,
ignore_index);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndexGrad<T, int64_t>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad_2d.data<T>(),
loss_grad->data<T>(),
labels->data<int64_t>(),
start_index,
end_index,
N,
D,
ignore_index);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(c_softmax_with_cross_entropy,
GPU,
ALL_LAYOUT,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel,
float,
double,
plat::float16) {}
PD_REGISTER_STRUCT_KERNEL(c_softmax_with_cross_entropy_grad,
GPU,
ALL_LAYOUT,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel,
float,
double,
plat::float16) {}
| 80d95ac434074809fb6a2bc1188ad6d8ae4c7018.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/cross_entropy.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/softmax_impl.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndex(T* predicted_logits,
const T* logit,
const IndexT* label,
const IndexT ignore_index,
const int start_index,
const int end_index,
const int64_t N,
const int64_t D,
const int nranks) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE(((real_label < D * nranks) && (real_label >= 0)) ||
(real_label == ignore_index),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the class number. It should "
"be less than [%ld] or equal to [%ld], but received [%ld]",
static_cast<int64_t>(D * nranks),
static_cast<int64_t>(ignore_index),
static_cast<int64_t>(real_label));
if (real_label >= start_index && real_label < end_index) {
predicted_logits[i] = logit[i * D + real_label - start_index];
}
}
}
template <typename T, typename IndexT>
__global__ void CaculateLoss(T* loss,
const T* predict_logits,
const T* sum_exp_logits,
const IndexT* label,
const int64_t ignore_index,
const int N) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = static_cast<int64_t>(label[i]);
loss[i] = ignore_index == real_label
? static_cast<T>(0)
: phi::funcs::TolerableValue<T>()(
phi::funcs::TolerableValue<T>()(
phi::funcs::real_log(sum_exp_logits[i])) -
predict_logits[i]);
}
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndexGrad(T* logits_grad,
const T* loss_grad,
const IndexT* labels,
const int start_index,
const int end_index,
const int64_t N,
const int64_t D,
const int64_t ignore_index) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
auto lbl = static_cast<int64_t>(labels[row]);
if (lbl == ignore_index) {
logits_grad[i] = static_cast<T>(0.0);
} else if ((col + start_index) == labels[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
} else {
logits_grad[i] *= loss_grad[row];
}
}
}
template <typename T, typename DeviceContext>
class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const int rid = ctx.Attr<int>("ring_id");
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
} else {
CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
}
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const phi::DenseTensor* logits = ctx.Input<phi::DenseTensor>("Logits");
const phi::DenseTensor* labels = ctx.Input<phi::DenseTensor>("Label");
phi::DenseTensor* softmax = ctx.Output<phi::DenseTensor>("Softmax");
phi::DenseTensor* loss = ctx.Output<phi::DenseTensor>("Loss");
const int64_t ignore_index = ctx.Attr<int64_t>("ignore_index");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place);
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
// use global calculate stream
const auto stream = static_cast<phi::GPUContext*>(
platform::DeviceContextPool::Instance().Get(place))
->stream();
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
phi::DenseTensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = phi::funcs::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = phi::funcs::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
phi::DenseTensor logits_max;
logits_max = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* logits_max_buff = logits_max.mutable_data<T>(place);
auto eigen_logits_max = phi::funcs::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
logits_max_buff,
logits_max_buff,
logits_max.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(logits_max.dtype())),
ncclMax,
comm->comm(),
stream));
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(phi::funcs::ValueClip<T>());
// step 3, obtain predict target
phi::DenseTensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndex<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int32_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndex<T, int64_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int64_t>(),
ignore_index,
start_index,
end_index,
N,
D,
nranks);
}
void* predict_logits_buff = predicted_logits.mutable_data<T>(place);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
predict_logits_buff,
predict_logits_buff,
predicted_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(predicted_logits.dtype())),
ncclSum,
comm->comm(),
stream));
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
phi::DenseTensor sum_exp_logits;
sum_exp_logits = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits =
phi::funcs::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sum_exp_logits_buff,
sum_exp_logits_buff,
sum_exp_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum,
comm->comm(),
stream));
if (label_type == framework::proto::VarType::INT32) {
CaculateLoss<T, int32_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int32_t>(),
ignore_index,
N);
} else {
CaculateLoss<T, int64_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int64_t>(),
ignore_index,
N);
}
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const phi::DenseTensor* logits = ctx.Input<phi::DenseTensor>("Logits");
const phi::DenseTensor* labels = ctx.Input<phi::DenseTensor>("Label");
phi::DenseTensor* softmax = ctx.Output<phi::DenseTensor>("Softmax");
phi::DenseTensor* loss = ctx.Output<phi::DenseTensor>("Loss");
const int64_t ignore_index = ctx.Attr<int64_t>("ignore_index");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto map = distributed::ProcessGroupMapFromGid::getInstance();
distributed::ProcessGroup* pg = map->get(rid);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::MAX;
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
phi::DenseTensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = phi::funcs::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = phi::funcs::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
phi::DenseTensor logits_max;
logits_max = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
auto eigen_logits_max = phi::funcs::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
std::vector<phi::DenseTensor> in_out;
in_out.push_back(logits_max);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(phi::funcs::ValueClip<T>());
// step 3, obtain predict target
phi::DenseTensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndex<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int32_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndex<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(),
softmax_2d.data<T>(),
labels->data<int64_t>(),
static_cast<int32_t>(ignore_index),
start_index,
end_index,
N,
D,
nranks);
}
in_out.clear();
in_out.push_back(predicted_logits);
opts.reduce_op = distributed::ReduceOp::SUM;
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
phi::DenseTensor sum_exp_logits;
sum_exp_logits = ctx.AllocateTmpTensor<T, phi::GPUContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits =
phi::funcs::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
in_out.clear();
in_out.push_back(sum_exp_logits);
opts.reduce_op = distributed::ReduceOp::SUM;
pg->AllReduce(in_out, in_out, opts)->Synchronize();
if (label_type == framework::proto::VarType::INT32) {
CaculateLoss<T, int32_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int32_t>(),
ignore_index,
N);
} else {
CaculateLoss<T, int64_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_2d.data<T>(),
predicted_logits.data<T>(),
sum_exp_logits.data<T>(),
labels->data<int64_t>(),
ignore_index,
N);
}
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T, typename DeviceContext>
class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const phi::DenseTensor* labels = context.Input<phi::DenseTensor>("Label");
const phi::DenseTensor* loss_grad =
context.Input<phi::DenseTensor>(framework::GradVarName("Loss"));
phi::DenseTensor* logit_grad =
context.Output<phi::DenseTensor>(framework::GradVarName("Logits"));
const phi::DenseTensor* softmax =
context.Input<phi::DenseTensor>("Softmax");
const int64_t ignore_index = context.Attr<int64_t>("ignore_index");
const int rank = context.Attr<int>("rank");
auto& dev_ctx = context.template device_context<phi::GPUContext>();
if (logit_grad != softmax) {
framework::TensorCopy(
*softmax, context.GetPlace(), context.device_context(), logit_grad);
}
const auto sofrmax_dims = softmax->dims();
const int axis = sofrmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims);
phi::DenseTensor logit_grad_2d;
logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D});
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
const int start_index = rank * D;
const int end_index = start_index + D;
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndexGrad<T, int32_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logit_grad_2d.data<T>(),
loss_grad->data<T>(),
labels->data<int32_t>(),
start_index,
end_index,
N,
D,
ignore_index);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndexGrad<T, int64_t>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logit_grad_2d.data<T>(),
loss_grad->data<T>(),
labels->data<int64_t>(),
start_index,
end_index,
N,
D,
ignore_index);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(c_softmax_with_cross_entropy,
GPU,
ALL_LAYOUT,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel,
float,
double,
plat::float16) {}
PD_REGISTER_STRUCT_KERNEL(c_softmax_with_cross_entropy_grad,
GPU,
ALL_LAYOUT,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel,
float,
double,
plat::float16) {}
|
9f9a22948f0fd3d379983b8fe795233e00e4eb5f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Cosine Transform in row wise (DCT one)
* DCT_I_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
template <unsigned int TILE_DIM > __global__ void DCTI_Row_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cos(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrt(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / (numAColumns-1)); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
//hostComputedC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Row_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Row_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 9f9a22948f0fd3d379983b8fe795233e00e4eb5f.cu | /*
* Discrete Cosine Transform in row wise (DCT one)
* DCT_I_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
template <unsigned int TILE_DIM > __global__ void DCTI_Row_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cos(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrt(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / (numAColumns-1)); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
//hostComputedC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Row_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Row_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
bb68ae8ca85c1fd464e026be6beddbda820f1e7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_sub.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *newB = NULL;
hipMalloc(&newB, XSIZE*YSIZE);
char *first = NULL;
hipMalloc(&first, XSIZE*YSIZE);
char *second = NULL;
hipMalloc(&second, XSIZE*YSIZE);
int size_biggest = XSIZE*YSIZE;
int diff = 1;
int *size_newB = NULL;
hipMalloc(&size_newB, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_sub), dim3(gridBlock),dim3(threadBlock), 0, 0, newB,first,second,size_biggest,diff,size_newB);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_sub), dim3(gridBlock),dim3(threadBlock), 0, 0, newB,first,second,size_biggest,diff,size_newB);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_sub), dim3(gridBlock),dim3(threadBlock), 0, 0, newB,first,second,size_biggest,diff,size_newB);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bb68ae8ca85c1fd464e026be6beddbda820f1e7c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_sub.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *newB = NULL;
cudaMalloc(&newB, XSIZE*YSIZE);
char *first = NULL;
cudaMalloc(&first, XSIZE*YSIZE);
char *second = NULL;
cudaMalloc(&second, XSIZE*YSIZE);
int size_biggest = XSIZE*YSIZE;
int diff = 1;
int *size_newB = NULL;
cudaMalloc(&size_newB, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_sub<<<gridBlock,threadBlock>>>(newB,first,second,size_biggest,diff,size_newB);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_sub<<<gridBlock,threadBlock>>>(newB,first,second,size_biggest,diff,size_newB);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_sub<<<gridBlock,threadBlock>>>(newB,first,second,size_biggest,diff,size_newB);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bd594afd37e49ea746839f72989b6542e025b224.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <omp.h>
#include <string>
#include <fstream>
#include <vector>
#include <iomanip>
#include <iostream>
#include <sstream>
using namespace std;
struct GpuStruct
{
char *pav;
int kiekis;
double kaina;
};
class Struct
{
string pav;
int kiekis;
double kaina;
GpuStruct gpuStruct;
public:
Struct(string input);
~Struct(){hipFree(gpuStruct.pav);}
GpuStruct GetDev(){return gpuStruct;}
string Print();
};
Struct::Struct(string input)
{
int start, end;
start = 0;
end = input.find(' ');
pav = input.substr(0, end).c_str();
start = end + 1;
end = input.find(' ', start);
kiekis = stoi(input.substr(start, end - start));
start = end + 1;
kaina = stod(input.substr(start));
gpuStruct.kaina = kaina;
gpuStruct.kiekis = kiekis;
hipMalloc(&gpuStruct.pav, pav.size() + 1);
hipMemcpy(gpuStruct.pav, pav.c_str(), pav.size() + 1, hipMemcpyHostToDevice);
}
string Struct::Print()
{
stringstream ss;
ss << setw(15) << pav << setw(7) << kiekis << setw(20) << kaina;
return ss.str();
}
vector<vector<Struct>> ReadStuff(string file);
vector<string> ReadLines(string file);
string Titles();
string Print(int nr, Struct &s);
void syncOut(vector<vector<Struct>>&);
__global__ void DevPrint(GpuStruct *data, int* starts);
int main()
{
auto input = ReadStuff("LapunasD.txt");
int count = 0;
for(auto &vec : input)
count += vec.size();
cout << "\nsinchroninis isvedimas\n\n";
syncOut(input);
cout << "\nasinchroninis isvedimas\n\n";
cout << setw(10) << "Procesas" << setw(3) << "Nr" << Titles() << "\n\n";
vector<int> starts;
vector<GpuStruct> localStructs;
int put = 0;
for(auto &vec : input)
{
starts.push_back(put);
for(auto &s : vec)
{
localStructs.push_back(s.GetDev());
put++;
}
}
starts.push_back(put);
int *startsdev;
hipMalloc(&startsdev, sizeof(int) * starts.size());
hipMemcpy(startsdev, &starts[0], sizeof(int) * starts.size(), hipMemcpyHostToDevice);
GpuStruct *arr;
hipMalloc(&arr, sizeof(GpuStruct) * count);
hipMemcpy(arr, &localStructs[0], sizeof(GpuStruct) * count, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( DevPrint), dim3(1), dim3(input.size()), 0, 0, arr, startsdev);
hipDeviceSynchronize();
system("pause");
hipFree(arr);
hipFree(startsdev);
return 0;
}
vector<vector<Struct>> ReadStuff(string file)
{
auto lines = ReadLines(file);
vector<vector<Struct>> ret;
vector<Struct> tmp;
for(int i = 0; i < lines.size(); i++)
{
if(lines[i] == "")
{
ret.push_back(move(tmp));
}
else
{
tmp.emplace_back(lines[i]);
}
}
return ret;
}
vector<string> ReadLines(string file)
{
vector<string> ret;
ifstream duom(file);
while(!duom.eof())
{
string line;
getline(duom, line);
ret.push_back(line);
}
return ret;
}
string Titles()
{
stringstream ss;
ss << setw(15) << "Pavadiniams" << setw(7) << "Kiekis" << setw(20) << "Kaina";
return ss.str();
}
void syncOut(vector<vector<Struct>> &data)
{
cout << setw(3) << "Nr" << Titles() << endl << endl;
for(int i = 0; i < data.size(); i++)
{
auto &vec = data[i];
cout << "Procesas_" << i << endl;
for(int j = 0; j < vec.size(); j++)
{
cout << Print(j, vec[j]) << endl;
}
}
}
string Print(int nr, Struct &s)
{
stringstream ss;
ss << setw(3) << nr << s.Print();
return ss.str();
}
__global__ void DevPrint(GpuStruct *data, int *starts)
{
int id = threadIdx.x;
GpuStruct *d = data + starts[id];
int count = starts[id+1] - starts[id];
for(int i = 0; i < count; i++)
{
printf("Procesas_%i %2i %14s %6i %19f\n", id, i, d[i].pav, d[i].kiekis, d[i].kaina);
}
} | bd594afd37e49ea746839f72989b6542e025b224.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <omp.h>
#include <string>
#include <fstream>
#include <vector>
#include <iomanip>
#include <iostream>
#include <sstream>
using namespace std;
struct GpuStruct
{
char *pav;
int kiekis;
double kaina;
};
class Struct
{
string pav;
int kiekis;
double kaina;
GpuStruct gpuStruct;
public:
Struct(string input);
~Struct(){cudaFree(gpuStruct.pav);}
GpuStruct GetDev(){return gpuStruct;}
string Print();
};
Struct::Struct(string input)
{
int start, end;
start = 0;
end = input.find(' ');
pav = input.substr(0, end).c_str();
start = end + 1;
end = input.find(' ', start);
kiekis = stoi(input.substr(start, end - start));
start = end + 1;
kaina = stod(input.substr(start));
gpuStruct.kaina = kaina;
gpuStruct.kiekis = kiekis;
cudaMalloc(&gpuStruct.pav, pav.size() + 1);
cudaMemcpy(gpuStruct.pav, pav.c_str(), pav.size() + 1, cudaMemcpyHostToDevice);
}
string Struct::Print()
{
stringstream ss;
ss << setw(15) << pav << setw(7) << kiekis << setw(20) << kaina;
return ss.str();
}
vector<vector<Struct>> ReadStuff(string file);
vector<string> ReadLines(string file);
string Titles();
string Print(int nr, Struct &s);
void syncOut(vector<vector<Struct>>&);
__global__ void DevPrint(GpuStruct *data, int* starts);
int main()
{
auto input = ReadStuff("LapunasD.txt");
int count = 0;
for(auto &vec : input)
count += vec.size();
cout << "\nsinchroninis isvedimas\n\n";
syncOut(input);
cout << "\nasinchroninis isvedimas\n\n";
cout << setw(10) << "Procesas" << setw(3) << "Nr" << Titles() << "\n\n";
vector<int> starts;
vector<GpuStruct> localStructs;
int put = 0;
for(auto &vec : input)
{
starts.push_back(put);
for(auto &s : vec)
{
localStructs.push_back(s.GetDev());
put++;
}
}
starts.push_back(put);
int *startsdev;
cudaMalloc(&startsdev, sizeof(int) * starts.size());
cudaMemcpy(startsdev, &starts[0], sizeof(int) * starts.size(), cudaMemcpyHostToDevice);
GpuStruct *arr;
cudaMalloc(&arr, sizeof(GpuStruct) * count);
cudaMemcpy(arr, &localStructs[0], sizeof(GpuStruct) * count, cudaMemcpyHostToDevice);
DevPrint<<<1, input.size()>>>(arr, startsdev);
cudaDeviceSynchronize();
system("pause");
cudaFree(arr);
cudaFree(startsdev);
return 0;
}
vector<vector<Struct>> ReadStuff(string file)
{
auto lines = ReadLines(file);
vector<vector<Struct>> ret;
vector<Struct> tmp;
for(int i = 0; i < lines.size(); i++)
{
if(lines[i] == "")
{
ret.push_back(move(tmp));
}
else
{
tmp.emplace_back(lines[i]);
}
}
return ret;
}
vector<string> ReadLines(string file)
{
vector<string> ret;
ifstream duom(file);
while(!duom.eof())
{
string line;
getline(duom, line);
ret.push_back(line);
}
return ret;
}
string Titles()
{
stringstream ss;
ss << setw(15) << "Pavadiniams" << setw(7) << "Kiekis" << setw(20) << "Kaina";
return ss.str();
}
void syncOut(vector<vector<Struct>> &data)
{
cout << setw(3) << "Nr" << Titles() << endl << endl;
for(int i = 0; i < data.size(); i++)
{
auto &vec = data[i];
cout << "Procesas_" << i << endl;
for(int j = 0; j < vec.size(); j++)
{
cout << Print(j, vec[j]) << endl;
}
}
}
string Print(int nr, Struct &s)
{
stringstream ss;
ss << setw(3) << nr << s.Print();
return ss.str();
}
__global__ void DevPrint(GpuStruct *data, int *starts)
{
int id = threadIdx.x;
GpuStruct *d = data + starts[id];
int count = starts[id+1] - starts[id];
for(int i = 0; i < count; i++)
{
printf("Procesas_%i %2i %14s %6i %19f\n", id, i, d[i].pav, d[i].kiekis, d[i].kaina);
}
} |
3a595dc7f9d6e69f13c8107243784d283c03cbc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/stop/criterion_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/stop/stopping_status.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Set all statuses namespace.
* @ref set_status
* @ingroup set_all_statuses
*/
namespace set_all_statuses {
constexpr int default_block_size = 512;
__global__ __launch_bounds__(default_block_size) void set_all_statuses(
size_type num_elems, uint8 stoppingId, bool setFinalized,
stopping_status* stop_status)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_elems) {
stop_status[tidx].stop(stoppingId, setFinalized);
}
}
void set_all_statuses(std::shared_ptr<const CudaExecutor> exec,
uint8 stoppingId, bool setFinalized,
Array<stopping_status>* stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(stop_status->get_num_elems(), block_size.x), 1,
1);
hipLaunchKernelGGL(( set_all_statuses), dim3(grid_size), dim3(block_size), 0, 0,
stop_status->get_num_elems(), stoppingId, setFinalized,
as_cuda_type(stop_status->get_data()));
}
} // namespace set_all_statuses
} // namespace cuda
} // namespace kernels
} // namespace gko
| 3a595dc7f9d6e69f13c8107243784d283c03cbc3.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/stop/criterion_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/stop/stopping_status.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Set all statuses namespace.
* @ref set_status
* @ingroup set_all_statuses
*/
namespace set_all_statuses {
constexpr int default_block_size = 512;
__global__ __launch_bounds__(default_block_size) void set_all_statuses(
size_type num_elems, uint8 stoppingId, bool setFinalized,
stopping_status* stop_status)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_elems) {
stop_status[tidx].stop(stoppingId, setFinalized);
}
}
void set_all_statuses(std::shared_ptr<const CudaExecutor> exec,
uint8 stoppingId, bool setFinalized,
Array<stopping_status>* stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(stop_status->get_num_elems(), block_size.x), 1,
1);
set_all_statuses<<<grid_size, block_size, 0, 0>>>(
stop_status->get_num_elems(), stoppingId, setFinalized,
as_cuda_type(stop_status->get_data()));
}
} // namespace set_all_statuses
} // namespace cuda
} // namespace kernels
} // namespace gko
|
d8aa121c300f4dabb7d35ed774696c2562cc2a7c.hip | // !!! This is a file automatically generated by hipify!!!
/*
Implements the sequential cuda vectors.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <petsc/private/vecimpl.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#undef __FUNCT__
#define __FUNCT__ "VecCUDAAllocateCheck"
/*
Allocates space for the vector array on the GPU if it does not exist.
Does NOT change the PetscCUDAFlag for the vector
Does NOT zero the CUDA array
*/
PetscErrorCode VecCUDAAllocateCheck(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
hipStream_t stream;
Vec_CUDA *veccuda;
PetscFunctionBegin;
if (!v->spptr) {
ierr = PetscMalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)v->spptr;
err = hipMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err);
veccuda->GPUarray = veccuda->GPUarray_allocated;
err = hipStreamCreate(&stream);CHKERRCUDA(err);
veccuda->stream = stream;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
if (v->valid_GPU_array == PETSC_CUDA_UNALLOCATED) {
if (v->data && ((Vec_Seq*)v->data)->array) {
v->valid_GPU_array = PETSC_CUDA_CPU;
} else {
v->valid_GPU_array = PETSC_CUDA_GPU;
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPU"
/* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */
PetscErrorCode VecCUDACopyToGPU(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = hipMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPUSome"
PetscErrorCode VecCUDACopyToGPUSome(Vec v, PetscCUDAIndices ci)
{
PetscScalar *varray;
PetscErrorCode ierr;
hipError_t err;
PetscScalar *cpuPtr, *gpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
s = (Vec_Seq*)v->data;
ierr = PetscLogEventBegin(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
varray = ((Vec_CUDA*)v->spptr)->GPUarray;
gpuPtr = varray + ptop_scatter->recvLowestIndex;
cpuPtr = s->array + ptop_scatter->recvLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = hipMemcpy(gpuPtr,cpuPtr,ptop_scatter->nr*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
// Set the buffer states
v->valid_GPU_array = PETSC_CUDA_BOTH;
ierr = PetscLogEventEnd(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPU"
/*
VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU
*/
PetscErrorCode VecCUDACopyFromGPU(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = hipMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPUSome"
/* Note that this function only copies *some* of the values up from the GPU to CPU,
which means that we need recombine the data at some point before using any of the standard functions.
We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray
where you have to always call in pairs
*/
PetscErrorCode VecCUDACopyFromGPUSome(Vec v, PetscCUDAIndices ci)
{
const PetscScalar *varray, *gpuPtr;
PetscErrorCode ierr;
hipError_t err;
PetscScalar *cpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
varray=((Vec_CUDA*)v->spptr)->GPUarray;
s = (Vec_Seq*)v->data;
gpuPtr = varray + ptop_scatter->sendLowestIndex;
cpuPtr = s->array + ptop_scatter->sendLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = hipMemcpy(cpuPtr,gpuPtr,ptop_scatter->ns*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(v,&varray);CHKERRQ(ierr);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA
Options Database Keys:
. -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq()
M*/
#undef __FUNCT__
#define __FUNCT__ "VecAYPX_SeqCUDA"
PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
PetscScalar sone=1.0;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = hipMemcpy(yarray,xarray,bn*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
} else if (alpha == (PetscScalar)1.0) {
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
} else {
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPY_SeqCUDA"
PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
if (alpha != (PetscScalar)0.0) {
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseDivide_SeqCUDA"
PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin)
{
PetscInt n = xin->map->n;
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = PetscLogFlops(n);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecWAXPY_SeqCUDA"
PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin)
{
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
err = hipMemcpy(warray,yarray,win->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecMAXPY_SeqCUDA"
PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n,j,j_rem;
PetscScalar alpha0,alpha1,alpha2,alpha3;
PetscFunctionBegin;
ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr);
switch (j_rem=nv&0x3) {
case 3:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha += 3;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
y += 3;
break;
case 2:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha +=2;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
y +=2;
break;
case 1:
alpha0 = *alpha++;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
y +=1;
break;
}
for (j=j_rem; j<nv; j+=4) {
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha3 = alpha[3];
alpha += 4;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha3,y[3]);CHKERRQ(ierr);
y += 4;
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDot_SeqCUDA"
PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
/* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */
cberr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n >0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
//
// CUDA kernels for MDot to follow
//
// set work group size to be a power of 2 (128 is usually a good compromise between portability and speed)
#define MDOT_WORKGROUP_SIZE 128
#define MDOT_WORKGROUP_NUM 128
#if !defined(PETSC_USE_COMPLEX)
// M = 2:
__global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE];
}
}
// M = 3:
__global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
}
}
// M = 4:
__global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
}
}
// M = 8:
__global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
PetscScalar group_sum4 = 0;
PetscScalar group_sum5 = 0;
PetscScalar group_sum6 = 0;
PetscScalar group_sum7 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
group_sum4 += entry_x * y4[i];
group_sum5 += entry_x * y5[i];
group_sum6 += entry_x * y6[i];
group_sum7 += entry_x * y7[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4;
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5;
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6;
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE];
}
}
#endif /* !defined(PETSC_USE_COMPLEX) */
#undef __FUNCT__
#define __FUNCT__ "VecMDot_SeqCUDA"
PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z)
{
PetscErrorCode ierr;
PetscInt i,n = xin->map->n,current_y_index = 0;
const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr;
PetscScalar *group_results_gpu;
#if !defined(PETSC_USE_COMPLEX)
PetscInt j;
PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel
#endif
hipError_t cuda_ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive.");
/* Handle the case of local size zero first */
if (!xin->map->n) {
for (i=0; i<nv; ++i) z[i] = 0;
PetscFunctionReturn(0);
}
// allocate scratchpad memory for the results of individual work groups:
cuda_ierr = hipMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8);CHKERRCUDA(cuda_ierr);
ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr);
while (current_y_index < nv)
{
switch (nv - current_y_index) {
case 7:
case 6:
case 5:
case 4:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel4), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<4; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
current_y_index += 4;
break;
case 3:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel3), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<3; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
current_y_index += 3;
break;
case 2:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel2), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<2; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
current_y_index += 2;
break;
case 1:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
current_y_index += 1;
break;
default: // 8 or more vectors left
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr);
#else
// run kernel:
hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel8), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<8; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
current_y_index += 8;
break;
}
}
ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr);
cuda_ierr = hipFree(group_results_gpu);CHKERRCUDA(cuda_ierr);
ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef MDOT_WORKGROUP_SIZE
#undef MDOT_WORKGROUP_NUM
#undef __FUNCT__
#define __FUNCT__ "VecSet_SeqCUDA"
PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscInt n = xin->map->n;
PetscScalar *xarray=NULL;
thrust::device_ptr<PetscScalar> xptr;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = hipMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err);
} else {
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::fill(xptr,xptr+n,alpha);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayWrite(xin,&xarray);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScale_SeqCUDA"
PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscScalar *xarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
if (alpha == (PetscScalar)0.0) {
ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr);
} else if (alpha != (PetscScalar)1.0) {
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecTDot_SeqCUDA"
PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n > 0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCopy_SeqCUDA"
PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (xin != yin) {
if (xin->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU if we are on the CPU*/
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */
if (yin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU */
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_GPU) {
/* copy in GPU */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck
default to copy in GPU (this is an arbitrary choice) */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecSwap_SeqCUDA"
PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscBLASInt one = 1,bn;
PetscScalar *xarray,*yarray;
hipblasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (xin != yin) {
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBY_SeqCUDA"
PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin)
{
PetscErrorCode ierr;
PetscScalar a = alpha,b = beta;
const PetscScalar *xarray;
PetscScalar *yarray;
PetscBLASInt one = 1, bn;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
if (a == (PetscScalar)0.0) {
ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr);
} else if (b == (PetscScalar)1.0) {
ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr);
} else if (a == (PetscScalar)1.0) {
ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr);
} else if (b == (PetscScalar)0.0) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBYPCZ_SeqCUDA"
PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscInt n = zin->map->n;
PetscFunctionBegin;
if (gamma == (PetscScalar)1.0) {
/* z = ax + b*y + z */
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
} else {
/* z = a*x + b*y + c*z */
ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseMult_SeqCUDA"
PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin)
{
PetscInt n = win->map->n;
const PetscScalar *xarray,*yarray;
PetscScalar *warray;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = PetscLogFlops(n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* should do infinity norm in cuda */
#undef __FUNCT__
#define __FUNCT__ "VecNorm_SeqCUDA"
PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n;
PetscBLASInt one = 1, bn;
const PetscScalar *xarray;
hipblasStatus_t cberr;
hipError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr);
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
PetscInt i;
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr);
err = hipMemcpy(z,xarray+i,sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
} else if (type == NORM_1) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDotNorm2_SeqCUDA"
PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm)
{
PetscErrorCode ierr;
PetscReal n=s->map->n;
const PetscScalar *sarray,*tarray;
PetscFunctionBegin;
ierr = VecCUDAGetArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDestroy_SeqCUDA"
PetscErrorCode VecDestroy_SeqCUDA(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_COMPLEX)
struct conjugate
{
__host__ __device__
PetscScalar operator()(PetscScalar x)
{
return PetscConj(x);
}
};
#endif
#undef __FUNCT__
#define __FUNCT__ "VecConjugate_SeqCUDA"
PetscErrorCode VecConjugate_SeqCUDA(Vec xin)
{
PetscScalar *xarray;
PetscErrorCode ierr;
#if defined(PETSC_USE_COMPLEX)
PetscInt n = xin->map->n;
thrust::device_ptr<PetscScalar> xptr;
#endif
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::transform(xptr,xptr+n,xptr,conjugate());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
#endif
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecGetLocalVector_SeqCUDA"
PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
hipError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (w->data) {
if (((Vec_Seq*)w->data)->array_allocated) {
ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr);
}
((Vec_Seq*)w->data)->array = NULL;
((Vec_Seq*)w->data)->unplacedarray = NULL;
}
if (w->spptr) {
if (((Vec_CUDA*)w->spptr)->GPUarray) {
err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
}
err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
if (v->petscnative) {
ierr = PetscFree(w->data);CHKERRQ(ierr);
w->data = v->data;
w->valid_GPU_array = v->valid_GPU_array;
w->spptr = v->spptr;
ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr);
} else {
ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
w->valid_GPU_array = PETSC_CUDA_CPU;
ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecRestoreLocalVector_SeqCUDA"
PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
hipError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (v->petscnative) {
v->data = w->data;
v->valid_GPU_array = w->valid_GPU_array;
v->spptr = w->spptr;
ierr = VecCUDACopyFromGPU(v);CHKERRQ(ierr);
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
w->data = 0;
w->valid_GPU_array = PETSC_CUDA_UNALLOCATED;
w->spptr = 0;
} else {
ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
if ((Vec_CUDA*)w->spptr) {
err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayReadWrite"
/*@C
VecCUDAGetArrayReadWrite - Provides access to the CUDA buffer inside a vector.
This function has semantics similar to VecGetArray(): the pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayReadWrite() assumes that
the user will modify the vector data. This is similar to
intent(inout) in fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayReadWrite(). Upon restoring the vector data
the data on the host will be marked as out of date. A subsequent
access of the host data will thus incur a data transfer from the
device to the host.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA device pointer
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayReadWrite"
/*@C
VecCUDARestoreArrayReadWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayReadWrite().
This marks the host data as out of date. Subsequent access to the
vector data on the host side with for instance VecGetArray() incurs a
data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayReadWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayRead"
/*@C
VecCUDAGetArrayRead - Provides read access to the CUDA buffer inside a vector.
This function is analogous to VecGetArrayRead(): The pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayRead() assumes that the
user will not modify the vector data. This is analgogous to
intent(in) in Fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayRead(). If the data on the host side was
previously up to date it will remain so, i.e. data on both the device
and the host is up to date. Accessing data on the host side does not
incur a device to host data transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayRead(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayRead(Vec v, const PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayRead"
/*@C
VecCUDARestoreArrayRead - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayRead().
If the data on the host side was previously up to date it will remain
so, i.e. data on both the device and the host is up to date.
Accessing data on the host side e.g. with VecGetArray() does not
incur a device to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayRead() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayRead(Vec v, const PetscScalar **a)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayWrite"
/*@C
VecCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a vector.
The data pointed to by the device pointer is uninitialized. The user
may not read from this data. Furthermore, the entire array needs to
be filled by the user to obtain well-defined behaviour. The device
memory will be allocated by this function if it hasn't been allocated
previously. This is analogous to intent(out) in Fortran.
The device pointer needs to be released with
VecCUDARestoreArrayWrite(). When the pointer is released the
host data of the vector is marked as out of data. Subsequent access
of the host data with e.g. VecGetArray() incurs a device to host data
transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer
Fortran note:
This function is not currently available from Fortran.
Level: advanced
.seealso: VecCUDARestoreArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayWrite"
/*@C
VecCUDARestoreArrayWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayWrite().
Data on the host will be marked as out of date. Subsequent access of
the data on the host side e.g. with VecGetArray() will incur a device
to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAPlaceArray"
/*@C
VecCUDAPlaceArray - Allows one to replace the GPU array in a vector with a
GPU array provided by the user. This is useful to avoid copying an
array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
You can return to the original GPU array with a call to VecCUDAResetArray()
It is not possible to use VecCUDAPlaceArray() and VecPlaceArray() at the
same time on the same vector.
Level: developer
.seealso: VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAPlaceArray(Vec vin,PetscScalar *a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
if (((Vec_Seq*)vin->data)->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecCUDAPlaceArray()/VecPlaceArray() was already called on this vector, without a call to VecCUDAResetArray()/VecResetArray()");
((Vec_Seq*)vin->data)->unplacedarray = (PetscScalar *) ((Vec_CUDA*)vin->spptr)->GPUarray; /* save previous GPU array so reset can bring it back */
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAReplaceArray"
/*@C
VecCUDAReplaceArray - Allows one to replace the GPU array in a vector
with a GPU array provided by the user. This is useful to avoid copying
a GPU array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
This permanently replaces the GPU array and frees the memory associated
with the old GPU array.
The memory passed in CANNOT be freed by the user. It will be freed
when the vector is destroyed.
Not supported from Fortran
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecPlaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAPlaceArray(), VecReplaceArray()
@*/
PetscErrorCode VecCUDAReplaceArray(Vec vin,PetscScalar *a)
{
hipError_t err;
PetscFunctionBegin;
err = hipFree(((Vec_CUDA*)vin->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAResetArray"
/*@C
VecCUDAResetArray - Resets a vector to use its default memory. Call this
after the use of VecCUDAPlaceArray().
Not Collective
Input Parameters:
. vec - the vector
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecPlaceArray(), VecResetArray(), VecCUDAPlaceArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAResetArray(Vec vin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
((Vec_CUDA*)vin->spptr)->GPUarray = (PetscScalar *) ((Vec_Seq*)vin->data)->unplacedarray;
((Vec_Seq*)vin->data)->unplacedarray = 0;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
| d8aa121c300f4dabb7d35ed774696c2562cc2a7c.cu | /*
Implements the sequential cuda vectors.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <petsc/private/vecimpl.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#include <cuda_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#undef __FUNCT__
#define __FUNCT__ "VecCUDAAllocateCheck"
/*
Allocates space for the vector array on the GPU if it does not exist.
Does NOT change the PetscCUDAFlag for the vector
Does NOT zero the CUDA array
*/
PetscErrorCode VecCUDAAllocateCheck(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
cudaStream_t stream;
Vec_CUDA *veccuda;
PetscFunctionBegin;
if (!v->spptr) {
ierr = PetscMalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)v->spptr;
err = cudaMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err);
veccuda->GPUarray = veccuda->GPUarray_allocated;
err = cudaStreamCreate(&stream);CHKERRCUDA(err);
veccuda->stream = stream;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
if (v->valid_GPU_array == PETSC_CUDA_UNALLOCATED) {
if (v->data && ((Vec_Seq*)v->data)->array) {
v->valid_GPU_array = PETSC_CUDA_CPU;
} else {
v->valid_GPU_array = PETSC_CUDA_GPU;
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPU"
/* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */
PetscErrorCode VecCUDACopyToGPU(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = cudaMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyToGPUSome"
PetscErrorCode VecCUDACopyToGPUSome(Vec v, PetscCUDAIndices ci)
{
PetscScalar *varray;
PetscErrorCode ierr;
cudaError_t err;
PetscScalar *cpuPtr, *gpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_CPU) {
s = (Vec_Seq*)v->data;
ierr = PetscLogEventBegin(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
varray = ((Vec_CUDA*)v->spptr)->GPUarray;
gpuPtr = varray + ptop_scatter->recvLowestIndex;
cpuPtr = s->array + ptop_scatter->recvLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = cudaMemcpy(gpuPtr,cpuPtr,ptop_scatter->nr*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
// Set the buffer states
v->valid_GPU_array = PETSC_CUDA_BOTH;
ierr = PetscLogEventEnd(VEC_CUDACopyToGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPU"
/*
VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU
*/
PetscErrorCode VecCUDACopyFromGPU(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscScalar *varray;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
veccuda=(Vec_CUDA*)v->spptr;
varray=veccuda->GPUarray;
err = cudaMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr);
v->valid_GPU_array = PETSC_CUDA_BOTH;
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDACopyFromGPUSome"
/* Note that this function only copies *some* of the values up from the GPU to CPU,
which means that we need recombine the data at some point before using any of the standard functions.
We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray
where you have to always call in pairs
*/
PetscErrorCode VecCUDACopyFromGPUSome(Vec v, PetscCUDAIndices ci)
{
const PetscScalar *varray, *gpuPtr;
PetscErrorCode ierr;
cudaError_t err;
PetscScalar *cpuPtr;
Vec_Seq *s;
VecScatterCUDAIndices_PtoP ptop_scatter = (VecScatterCUDAIndices_PtoP)ci->scatter;
PetscFunctionBegin;
ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr);
if (v->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = PetscLogEventBegin(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
varray=((Vec_CUDA*)v->spptr)->GPUarray;
s = (Vec_Seq*)v->data;
gpuPtr = varray + ptop_scatter->sendLowestIndex;
cpuPtr = s->array + ptop_scatter->sendLowestIndex;
/* Note : this code copies the smallest contiguous chunk of data
containing ALL of the indices */
err = cudaMemcpy(cpuPtr,gpuPtr,ptop_scatter->ns*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(v,&varray);CHKERRQ(ierr);
ierr = PetscLogEventEnd(VEC_CUDACopyFromGPUSome,v,0,0,0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA
Options Database Keys:
. -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq()
M*/
#undef __FUNCT__
#define __FUNCT__ "VecAYPX_SeqCUDA"
PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
PetscScalar sone=1.0;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = cudaMemcpy(yarray,xarray,bn*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
} else if (alpha == (PetscScalar)1.0) {
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
} else {
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPY_SeqCUDA"
PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
if (alpha != (PetscScalar)0.0) {
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseDivide_SeqCUDA"
PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin)
{
PetscInt n = xin->map->n;
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = PetscLogFlops(n);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecWAXPY_SeqCUDA"
PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin)
{
const PetscScalar *xarray=NULL,*yarray=NULL;
PetscScalar *warray=NULL;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr);
err = cudaMemcpy(warray,yarray,win->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecMAXPY_SeqCUDA"
PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n,j,j_rem;
PetscScalar alpha0,alpha1,alpha2,alpha3;
PetscFunctionBegin;
ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr);
switch (j_rem=nv&0x3) {
case 3:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha += 3;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
y += 3;
break;
case 2:
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha +=2;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
y +=2;
break;
case 1:
alpha0 = *alpha++;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
y +=1;
break;
}
for (j=j_rem; j<nv; j+=4) {
alpha0 = alpha[0];
alpha1 = alpha[1];
alpha2 = alpha[2];
alpha3 = alpha[3];
alpha += 4;
ierr = VecAXPY_SeqCUDA(xin,alpha0,y[0]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha1,y[1]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha2,y[2]);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(xin,alpha3,y[3]);CHKERRQ(ierr);
y += 4;
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDot_SeqCUDA"
PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
/* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */
cberr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n >0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
//
// CUDA kernels for MDot to follow
//
// set work group size to be a power of 2 (128 is usually a good compromise between portability and speed)
#define MDOT_WORKGROUP_SIZE 128
#define MDOT_WORKGROUP_NUM 128
#if !defined(PETSC_USE_COMPLEX)
// M = 2:
__global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE];
}
}
// M = 3:
__global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
}
}
// M = 4:
__global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
}
}
// M = 8:
__global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3,
const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7,
PetscInt size, PetscScalar *group_results)
{
__shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE];
PetscInt entries_per_group = (size - 1) / gridDim.x + 1;
entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work
PetscInt vec_start_index = blockIdx.x * entries_per_group;
PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size
PetscScalar entry_x = 0;
PetscScalar group_sum0 = 0;
PetscScalar group_sum1 = 0;
PetscScalar group_sum2 = 0;
PetscScalar group_sum3 = 0;
PetscScalar group_sum4 = 0;
PetscScalar group_sum5 = 0;
PetscScalar group_sum6 = 0;
PetscScalar group_sum7 = 0;
for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) {
entry_x = x[i]; // load only once from global memory!
group_sum0 += entry_x * y0[i];
group_sum1 += entry_x * y1[i];
group_sum2 += entry_x * y2[i];
group_sum3 += entry_x * y3[i];
group_sum4 += entry_x * y4[i];
group_sum5 += entry_x * y5[i];
group_sum6 += entry_x * y6[i];
group_sum7 += entry_x * y7[i];
}
tmp_buffer[threadIdx.x] = group_sum0;
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1;
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2;
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3;
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4;
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5;
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6;
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7;
// parallel reduction
for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) {
__syncthreads();
if (threadIdx.x < stride) {
tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ];
tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE];
tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE];
}
}
// write result of group to group_results
if (threadIdx.x == 0) {
group_results[blockIdx.x ] = tmp_buffer[0];
group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE];
group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE];
}
}
#endif /* !defined(PETSC_USE_COMPLEX) */
#undef __FUNCT__
#define __FUNCT__ "VecMDot_SeqCUDA"
PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z)
{
PetscErrorCode ierr;
PetscInt i,n = xin->map->n,current_y_index = 0;
const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr;
PetscScalar *group_results_gpu;
#if !defined(PETSC_USE_COMPLEX)
PetscInt j;
PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel
#endif
cudaError_t cuda_ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive.");
/* Handle the case of local size zero first */
if (!xin->map->n) {
for (i=0; i<nv; ++i) z[i] = 0;
PetscFunctionReturn(0);
}
// allocate scratchpad memory for the results of individual work groups:
cuda_ierr = cudaMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8);CHKERRCUDA(cuda_ierr);
ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr);
while (current_y_index < nv)
{
switch (nv - current_y_index) {
case 7:
case 6:
case 5:
case 4:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel4<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<4; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
current_y_index += 4;
break;
case 3:
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel3<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<3; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
current_y_index += 3;
break;
case 2:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel2<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<2; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
current_y_index += 2;
break;
case 1:
ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr);
current_y_index += 1;
break;
default: // 8 or more vectors left
ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr);
cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr);
#else
// run kernel:
VecMDot_SeqCUDA_kernel8<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu);
// copy results back to
cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr);
// sum group results into z:
for (j=0; j<8; ++j) {
z[current_y_index + j] = 0;
for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i];
}
#endif
ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr);
current_y_index += 8;
break;
}
}
ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr);
cuda_ierr = cudaFree(group_results_gpu);CHKERRCUDA(cuda_ierr);
ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef MDOT_WORKGROUP_SIZE
#undef MDOT_WORKGROUP_NUM
#undef __FUNCT__
#define __FUNCT__ "VecSet_SeqCUDA"
PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscInt n = xin->map->n;
PetscScalar *xarray=NULL;
thrust::device_ptr<PetscScalar> xptr;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr);
if (alpha == (PetscScalar)0.0) {
err = cudaMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err);
} else {
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::fill(xptr,xptr+n,alpha);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayWrite(xin,&xarray);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScale_SeqCUDA"
PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha)
{
PetscScalar *xarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
if (alpha == (PetscScalar)0.0) {
ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr);
} else if (alpha != (PetscScalar)1.0) {
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecTDot_SeqCUDA"
PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z)
{
const PetscScalar *xarray,*yarray;
PetscErrorCode ierr;
PetscBLASInt one=1,bn;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
if (xin->map->n > 0) {
ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr);
}
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCopy_SeqCUDA"
PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin)
{
const PetscScalar *xarray;
PetscScalar *yarray;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (xin != yin) {
if (xin->valid_GPU_array == PETSC_CUDA_GPU) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU if we are on the CPU*/
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (xin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */
if (yin->valid_GPU_array == PETSC_CUDA_CPU) {
/* copy in CPU */
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_GPU) {
/* copy in GPU */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else if (yin->valid_GPU_array == PETSC_CUDA_BOTH) {
/* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck
default to copy in GPU (this is an arbitrary choice) */
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr);
}
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecSwap_SeqCUDA"
PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscBLASInt one = 1,bn;
PetscScalar *xarray,*yarray;
cublasStatus_t cberr;
PetscFunctionBegin;
ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr);
if (xin != yin) {
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBY_SeqCUDA"
PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin)
{
PetscErrorCode ierr;
PetscScalar a = alpha,b = beta;
const PetscScalar *xarray;
PetscScalar *yarray;
PetscBLASInt one = 1, bn;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr);
if (a == (PetscScalar)0.0) {
ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr);
} else if (b == (PetscScalar)1.0) {
ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr);
} else if (a == (PetscScalar)1.0) {
ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr);
} else if (b == (PetscScalar)0.0) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err);
cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr);
ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr);
cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecAXPBYPCZ_SeqCUDA"
PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin)
{
PetscErrorCode ierr;
PetscInt n = zin->map->n;
PetscFunctionBegin;
if (gamma == (PetscScalar)1.0) {
/* z = ax + b*y + z */
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
} else {
/* z = a*x + b*y + c*z */
ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr);
ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr);
ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr);
}
ierr = WaitForGPU();CHKERRCUDA(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecPointwiseMult_SeqCUDA"
PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin)
{
PetscInt n = win->map->n;
const PetscScalar *xarray,*yarray;
PetscScalar *warray;
thrust::device_ptr<const PetscScalar> xptr,yptr;
thrust::device_ptr<PetscScalar> wptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr);
try {
wptr = thrust::device_pointer_cast(warray);
xptr = thrust::device_pointer_cast(xarray);
yptr = thrust::device_pointer_cast(yarray);
thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayReadWrite(win,&warray);CHKERRQ(ierr);
ierr = PetscLogFlops(n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* should do infinity norm in cuda */
#undef __FUNCT__
#define __FUNCT__ "VecNorm_SeqCUDA"
PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z)
{
PetscErrorCode ierr;
PetscInt n = xin->map->n;
PetscBLASInt one = 1, bn;
const PetscScalar *xarray;
cublasStatus_t cberr;
cudaError_t err;
PetscFunctionBegin;
ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr);
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
PetscInt i;
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr);
err = cudaMemcpy(z,xarray+i,sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
} else if (type == NORM_1) {
ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr);
cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr);
ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDotNorm2_SeqCUDA"
PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm)
{
PetscErrorCode ierr;
PetscReal n=s->map->n;
const PetscScalar *sarray,*tarray;
PetscFunctionBegin;
ierr = VecCUDAGetArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr);
ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(s,&sarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(t,&tarray);CHKERRQ(ierr);
ierr = WaitForGPU();CHKERRCUDA(ierr);
ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecDestroy_SeqCUDA"
PetscErrorCode VecDestroy_SeqCUDA(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_COMPLEX)
struct conjugate
{
__host__ __device__
PetscScalar operator()(PetscScalar x)
{
return PetscConj(x);
}
};
#endif
#undef __FUNCT__
#define __FUNCT__ "VecConjugate_SeqCUDA"
PetscErrorCode VecConjugate_SeqCUDA(Vec xin)
{
PetscScalar *xarray;
PetscErrorCode ierr;
#if defined(PETSC_USE_COMPLEX)
PetscInt n = xin->map->n;
thrust::device_ptr<PetscScalar> xptr;
#endif
PetscFunctionBegin;
ierr = VecCUDAGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
#if defined(PETSC_USE_COMPLEX)
try {
xptr = thrust::device_pointer_cast(xarray);
thrust::transform(xptr,xptr+n,xptr,conjugate());
ierr = WaitForGPU();CHKERRCUDA(ierr);
} catch (char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex);
}
#endif
ierr = VecCUDARestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecGetLocalVector_SeqCUDA"
PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
cudaError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (w->data) {
if (((Vec_Seq*)w->data)->array_allocated) {
ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr);
}
((Vec_Seq*)w->data)->array = NULL;
((Vec_Seq*)w->data)->unplacedarray = NULL;
}
if (w->spptr) {
if (((Vec_CUDA*)w->spptr)->GPUarray) {
err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
}
err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
if (v->petscnative) {
ierr = PetscFree(w->data);CHKERRQ(ierr);
w->data = v->data;
w->valid_GPU_array = v->valid_GPU_array;
w->spptr = v->spptr;
ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr);
} else {
ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
w->valid_GPU_array = PETSC_CUDA_CPU;
ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecRestoreLocalVector_SeqCUDA"
PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w)
{
VecType t;
PetscErrorCode ierr;
cudaError_t err;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(v,VEC_CLASSID,1);
PetscValidHeaderSpecific(w,VEC_CLASSID,2);
ierr = VecGetType(w,&t);CHKERRQ(ierr);
ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed to argument #2. Should be %s.\n",t,VECSEQCUDA);
if (v->petscnative) {
v->data = w->data;
v->valid_GPU_array = w->valid_GPU_array;
v->spptr = w->spptr;
ierr = VecCUDACopyFromGPU(v);CHKERRQ(ierr);
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
w->data = 0;
w->valid_GPU_array = PETSC_CUDA_UNALLOCATED;
w->spptr = 0;
} else {
ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr);
if ((Vec_CUDA*)w->spptr) {
err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)w->spptr)->GPUarray = NULL;
err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(w->spptr);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayReadWrite"
/*@C
VecCUDAGetArrayReadWrite - Provides access to the CUDA buffer inside a vector.
This function has semantics similar to VecGetArray(): the pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayReadWrite() assumes that
the user will modify the vector data. This is similar to
intent(inout) in fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayReadWrite(). Upon restoring the vector data
the data on the host will be marked as out of date. A subsequent
access of the host data will thus incur a data transfer from the
device to the host.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA device pointer
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayReadWrite"
/*@C
VecCUDARestoreArrayReadWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayReadWrite().
This marks the host data as out of date. Subsequent access to the
vector data on the host side with for instance VecGetArray() incurs a
data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayReadWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayReadWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayRead"
/*@C
VecCUDAGetArrayRead - Provides read access to the CUDA buffer inside a vector.
This function is analogous to VecGetArrayRead(): The pointer
returned by this function points to a consistent view of the vector
data. This may involve a copy operation of data from the host to the
device if the data on the device is out of date. If the device
memory hasn't been allocated previously it will be allocated as part
of this function call. VecCUDAGetArrayRead() assumes that the
user will not modify the vector data. This is analgogous to
intent(in) in Fortran.
The CUDA device pointer has to be released by calling
VecCUDARestoreArrayRead(). If the data on the host side was
previously up to date it will remain so, i.e. data on both the device
and the host is up to date. Accessing data on the host side does not
incur a device to host data transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDARestoreArrayRead(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayRead(Vec v, const PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDACopyToGPU(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayRead"
/*@C
VecCUDARestoreArrayRead - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayRead().
If the data on the host side was previously up to date it will remain
so, i.e. data on both the device and the host is up to date.
Accessing data on the host side e.g. with VecGetArray() does not
incur a device to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayRead() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayRead(Vec v, const PetscScalar **a)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAGetArrayWrite"
/*@C
VecCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a vector.
The data pointed to by the device pointer is uninitialized. The user
may not read from this data. Furthermore, the entire array needs to
be filled by the user to obtain well-defined behaviour. The device
memory will be allocated by this function if it hasn't been allocated
previously. This is analogous to intent(out) in Fortran.
The device pointer needs to be released with
VecCUDARestoreArrayWrite(). When the pointer is released the
host data of the vector is marked as out of data. Subsequent access
of the host data with e.g. VecGetArray() incurs a device to host data
transfer.
Input Parameter:
. v - the vector
Output Parameter:
. a - the CUDA pointer
Fortran note:
This function is not currently available from Fortran.
Level: advanced
.seealso: VecCUDARestoreArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDAGetArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
*a = 0;
ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr);
*a = ((Vec_CUDA*)v->spptr)->GPUarray;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDARestoreArrayWrite"
/*@C
VecCUDARestoreArrayWrite - Restore a CUDA device pointer previously acquired with VecCUDAGetArrayWrite().
Data on the host will be marked as out of date. Subsequent access of
the data on the host side e.g. with VecGetArray() will incur a device
to host data transfer.
Input Parameter:
+ v - the vector
- a - the CUDA device pointer. This pointer is invalid after
VecCUDARestoreArrayWrite() returns.
Fortran note:
This function is not currently available from Fortran.
Level: intermediate
.seealso: VecCUDAGetArrayWrite(), VecCUDAGetArrayReadWrite(), VecCUDAGetArrayRead(), VecCUDAGetArrayWrite(), VecGetArray(), VecRestoreArray(), VecGetArrayRead()
@*/
PETSC_EXTERN PetscErrorCode VecCUDARestoreArrayWrite(Vec v, PetscScalar **a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
v->valid_GPU_array = PETSC_CUDA_GPU;
ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAPlaceArray"
/*@C
VecCUDAPlaceArray - Allows one to replace the GPU array in a vector with a
GPU array provided by the user. This is useful to avoid copying an
array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
You can return to the original GPU array with a call to VecCUDAResetArray()
It is not possible to use VecCUDAPlaceArray() and VecPlaceArray() at the
same time on the same vector.
Level: developer
.seealso: VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAPlaceArray(Vec vin,PetscScalar *a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
if (((Vec_Seq*)vin->data)->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecCUDAPlaceArray()/VecPlaceArray() was already called on this vector, without a call to VecCUDAResetArray()/VecResetArray()");
((Vec_Seq*)vin->data)->unplacedarray = (PetscScalar *) ((Vec_CUDA*)vin->spptr)->GPUarray; /* save previous GPU array so reset can bring it back */
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAReplaceArray"
/*@C
VecCUDAReplaceArray - Allows one to replace the GPU array in a vector
with a GPU array provided by the user. This is useful to avoid copying
a GPU array into a vector.
Not Collective
Input Parameters:
+ vec - the vector
- array - the GPU array
Notes:
This permanently replaces the GPU array and frees the memory associated
with the old GPU array.
The memory passed in CANNOT be freed by the user. It will be freed
when the vector is destroyed.
Not supported from Fortran
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecPlaceArray(), VecResetArray(), VecCUDAResetArray(), VecCUDAPlaceArray(), VecReplaceArray()
@*/
PetscErrorCode VecCUDAReplaceArray(Vec vin,PetscScalar *a)
{
cudaError_t err;
PetscFunctionBegin;
err = cudaFree(((Vec_CUDA*)vin->spptr)->GPUarray);CHKERRCUDA(err);
((Vec_CUDA*)vin->spptr)->GPUarray = a;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecCUDAResetArray"
/*@C
VecCUDAResetArray - Resets a vector to use its default memory. Call this
after the use of VecCUDAPlaceArray().
Not Collective
Input Parameters:
. vec - the vector
Level: developer
.seealso: VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecPlaceArray(), VecResetArray(), VecCUDAPlaceArray(), VecCUDAReplaceArray()
@*/
PetscErrorCode VecCUDAResetArray(Vec vin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCUDACopyToGPU(vin);CHKERRQ(ierr);
((Vec_CUDA*)vin->spptr)->GPUarray = (PetscScalar *) ((Vec_Seq*)vin->data)->unplacedarray;
((Vec_Seq*)vin->data)->unplacedarray = 0;
vin->valid_GPU_array = PETSC_CUDA_GPU;
PetscFunctionReturn(0);
}
|
d2a95d5452fd26c98cf37c62ccf75d46b4ea3c73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file krum.cu
* @author Sbastien Rouault <[email protected]>
*
* @section LICENSE
*
* Copyright 2018-2019 cole Polytechnique Fdrale de Lausanne (EPFL).
* See LICENSE file.
*
* @section DESCRIPTION
*
* Multi-Krum GAR, native CUDA implementation.
*
* Based on the algorithm introduced in the following paper:
* Blanchard Peva, El Mhamdi El Mahdi, Guerraoui Rachid, and Stainer Julien.
* Machine learning with adversaries: Byzantine tolerant gradient descent.
* In Advances in Neural Information Processing Systems 30, pp.118128.
* Curran Associates, Inc., 2017.
**/
// Compiler version check
#ifndef __HIPCC__
#error This translation unit requires a CUDA compiler
#endif
#if __cplusplus < 201103L
#error This translation unit requires at least a C++11 compiler
#endif
#ifndef __GNUC__
#error This translation unit requires a GNU C++ compiler
#endif
// External headers
#include <hipcub/hipcub.hpp>
#include <vector>
// Internal headers
#include <common.hpp>
#include <cudarray.cu.hpp>
#include <operations.cu.hpp>
#include <pytorch.hpp>
#include "rule.hpp"
// -------------------------------------------------------------------------- //
// CUDA kernel templates
namespace {
constexpr size_t nb_threads_per_block = 128;
constexpr size_t nb_items_per_thread = 1;
/** Get the number of blocks needed to process the entries with the current configuration.
* @param nb_items Number of items to process
* @return Required number of blocks
**/
constexpr size_t nb_blocks(size_t nb_items) {
return (nb_items + nb_items_per_thread * nb_threads_per_block - 1) / (nb_items_per_thread * nb_threads_per_block);
}
}
// -------------------------------------------------------------------------- //
// Native implementation
namespace {
/** Aggregate the given tensor(s).
* @param inputs Given n tensors(s), at least one, must all be continuous 1-dim tensor(s) of same (non-null) size, data type and device
* @param f Number of Byzantine tensor(s) to tolerate, must be positive
* @param m Number of lowest-scoring tensor(s) to average as output, must be positive and not greater than n - f - 2
* @return Aggregated 1-dim tensor of same size, data type and device
**/
template<class T> ::torch::Tensor aggregate(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
// Initialize
auto const n = inputs.size();
auto const cn = n;
auto const kn = n * (n - 1);
auto const d = inputs[0].size(0);
auto const length_distances = kn / 2;
hipStream_t stream = 0; // Default stream
size_t length_reduction;
CUDA_ASSERT(hipcub::DeviceReduce::Sum(nullptr, length_reduction, static_cast<T const*>(nullptr), static_cast<T*>(nullptr), d, stream));
auto inter = vlcudarray<T>(length_distances + d + length_reduction); // Distances + intermediate vector + reduction vector
auto distances = inter.get();
auto intergrad = distances + length_distances;
auto reduction = intergrad + d;
auto select = vlcudarray<size_t>(m); // Index of the selected gradients
auto selected = select.get();
auto output = ::torch::empty_like(inputs[0]);
// Process
auto pos_to_gradid = vlarray<size_t>(kn);
{ // Distance computations
auto cursor = distances;
auto poscur = pos_to_gradid.get();
for (size_t i = 0; i < n - 1; ++i) {
for (size_t j = i + 1; j < n; ++j) {
hipLaunchKernelGGL(( squared_difference<T>), dim3(nb_blocks(d)), dim3(nb_threads_per_block), 0, stream, inputs[i].data_ptr<T>(), inputs[j].data_ptr<T>(), intergrad, d);
CUDA_ASSERT_KERNEL();
CUDA_ASSERT(hipcub::DeviceReduce::Sum(reduction, length_reduction, intergrad, cursor, d, stream));
++cursor;
*(poscur++) = i;
*(poscur++) = j;
}
}
}
T cpu_scores[cn];
{ // Score computations
T cpu_distances[length_distances];
CUDA_ASSERT(hipMemcpyAsync(cpu_distances, distances, length_distances * sizeof(T), hipMemcpyDeviceToHost, stream));
CUDA_ASSERT(hipStreamSynchronize(stream));
size_t cpu_ranks[length_distances]; // Indexes for 'cpu_distances', so that 'cpu_distances[cpu_ranks[i]]' increases with 'i' ('nan' is treated as '+inf')
{ // Compute 'cpu_ranks'
for (size_t i = 0; i < length_distances; ++i)
cpu_ranks[i] = i;
T* cpu_dist_ptr = cpu_distances;
::std::sort(cpu_ranks, cpu_ranks + length_distances, [cpu_dist_ptr](size_t a, size_t b) {
auto&& x = cpu_dist_ptr[a];
if (unlikely(!::std::isfinite(x)))
return false;
auto&& y = cpu_dist_ptr[b];
if (unlikely(!::std::isfinite(y)))
return true;
return x < y;
});
}
for (size_t i = 0; i < n; ++i) { // Compute 'scores'
T score = 0;
size_t count = n - f - 2;
for (auto* cursor = cpu_ranks; count > 0; ++cursor) {
auto index = *cursor;
if (pos_to_gradid[2 * index] == i || pos_to_gradid[2 * index + 1] == i) { // Associated distance concerns current gradient
score += cpu_distances[index];
--count;
}
}
cpu_scores[i] = score;
}
}
{ // Select the 'm' smallest scoring gradients and average them
size_t cpu_selected[cn]; // Index of the selected gradients
{ // Compute 'cpu_selected'
for (size_t i = 0; i < cn; ++i)
cpu_selected[i] = i;
T* cpu_scores_ptr = cpu_scores;
::std::nth_element(cpu_selected, cpu_selected + m, cpu_selected + n, [cpu_scores_ptr](size_t a, size_t b) {
auto&& x = cpu_scores_ptr[a];
if (unlikely(!::std::isfinite(x)))
return false;
auto&& y = cpu_scores_ptr[b];
if (unlikely(!::std::isfinite(y)))
return true;
return x < y;
});
}
CUDA_ASSERT(hipMemcpyAsync(selected, cpu_selected, m * sizeof(size_t), hipMemcpyHostToDevice, stream));
CUDArray<T const*> inputs_array{inputs.data(), inputs.size(), [](::torch::Tensor const& elem) -> T const* { return elem.data_ptr<T>(); }};
hipLaunchKernelGGL(( selection_average<T>), dim3(nb_blocks(d)), dim3(nb_threads_per_block), 0, stream, inputs_array.data(), output.data_ptr<T>(), d, reinterpret_cast<size_t*>(selected), m);
CUDA_ASSERT_KERNEL();
CUDA_ASSERT(hipStreamSynchronize(stream)); // FIXME: Really needed?
}
// Return
return output;
}
}
// -------------------------------------------------------------------------- //
// Rule member function definitions
/** Forward to the specialized aggregation function.
* @param ... Forwarded argument
* @return Forwarded return value
**/
::torch::Tensor Krum::aggregate_gpu_float(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
return aggregate<float>(inputs, f, m);
}
::torch::Tensor Krum::aggregate_gpu_double(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
return aggregate<double>(inputs, f, m);
}
| d2a95d5452fd26c98cf37c62ccf75d46b4ea3c73.cu | /**
* @file krum.cu
* @author Sébastien Rouault <[email protected]>
*
* @section LICENSE
*
* Copyright © 2018-2019 École Polytechnique Fédérale de Lausanne (EPFL).
* See LICENSE file.
*
* @section DESCRIPTION
*
* Multi-Krum GAR, native CUDA implementation.
*
* Based on the algorithm introduced in the following paper:
* Blanchard Peva, El Mhamdi El Mahdi, Guerraoui Rachid, and Stainer Julien.
* Machine learning with adversaries: Byzantine tolerant gradient descent.
* In Advances in Neural Information Processing Systems 30, pp.118–128.
* Curran Associates, Inc., 2017.
**/
// Compiler version check
#ifndef __CUDACC__
#error This translation unit requires a CUDA compiler
#endif
#if __cplusplus < 201103L
#error This translation unit requires at least a C++11 compiler
#endif
#ifndef __GNUC__
#error This translation unit requires a GNU C++ compiler
#endif
// External headers
#include <cub/cub.cuh>
#include <vector>
// Internal headers
#include <common.hpp>
#include <cudarray.cu.hpp>
#include <operations.cu.hpp>
#include <pytorch.hpp>
#include "rule.hpp"
// -------------------------------------------------------------------------- //
// CUDA kernel templates
namespace {
constexpr size_t nb_threads_per_block = 128;
constexpr size_t nb_items_per_thread = 1;
/** Get the number of blocks needed to process the entries with the current configuration.
* @param nb_items Number of items to process
* @return Required number of blocks
**/
constexpr size_t nb_blocks(size_t nb_items) {
return (nb_items + nb_items_per_thread * nb_threads_per_block - 1) / (nb_items_per_thread * nb_threads_per_block);
}
}
// -------------------------------------------------------------------------- //
// Native implementation
namespace {
/** Aggregate the given tensor(s).
* @param inputs Given n tensors(s), at least one, must all be continuous 1-dim tensor(s) of same (non-null) size, data type and device
* @param f Number of Byzantine tensor(s) to tolerate, must be positive
* @param m Number of lowest-scoring tensor(s) to average as output, must be positive and not greater than n - f - 2
* @return Aggregated 1-dim tensor of same size, data type and device
**/
template<class T> ::torch::Tensor aggregate(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
// Initialize
auto const n = inputs.size();
auto const cn = n;
auto const kn = n * (n - 1);
auto const d = inputs[0].size(0);
auto const length_distances = kn / 2;
cudaStream_t stream = 0; // Default stream
size_t length_reduction;
CUDA_ASSERT(cub::DeviceReduce::Sum(nullptr, length_reduction, static_cast<T const*>(nullptr), static_cast<T*>(nullptr), d, stream));
auto inter = vlcudarray<T>(length_distances + d + length_reduction); // Distances + intermediate vector + reduction vector
auto distances = inter.get();
auto intergrad = distances + length_distances;
auto reduction = intergrad + d;
auto select = vlcudarray<size_t>(m); // Index of the selected gradients
auto selected = select.get();
auto output = ::torch::empty_like(inputs[0]);
// Process
auto pos_to_gradid = vlarray<size_t>(kn);
{ // Distance computations
auto cursor = distances;
auto poscur = pos_to_gradid.get();
for (size_t i = 0; i < n - 1; ++i) {
for (size_t j = i + 1; j < n; ++j) {
squared_difference<T><<<nb_blocks(d), nb_threads_per_block, 0, stream>>>(inputs[i].data_ptr<T>(), inputs[j].data_ptr<T>(), intergrad, d);
CUDA_ASSERT_KERNEL();
CUDA_ASSERT(cub::DeviceReduce::Sum(reduction, length_reduction, intergrad, cursor, d, stream));
++cursor;
*(poscur++) = i;
*(poscur++) = j;
}
}
}
T cpu_scores[cn];
{ // Score computations
T cpu_distances[length_distances];
CUDA_ASSERT(cudaMemcpyAsync(cpu_distances, distances, length_distances * sizeof(T), cudaMemcpyDeviceToHost, stream));
CUDA_ASSERT(cudaStreamSynchronize(stream));
size_t cpu_ranks[length_distances]; // Indexes for 'cpu_distances', so that 'cpu_distances[cpu_ranks[i]]' increases with 'i' ('nan' is treated as '+inf')
{ // Compute 'cpu_ranks'
for (size_t i = 0; i < length_distances; ++i)
cpu_ranks[i] = i;
T* cpu_dist_ptr = cpu_distances;
::std::sort(cpu_ranks, cpu_ranks + length_distances, [cpu_dist_ptr](size_t a, size_t b) {
auto&& x = cpu_dist_ptr[a];
if (unlikely(!::std::isfinite(x)))
return false;
auto&& y = cpu_dist_ptr[b];
if (unlikely(!::std::isfinite(y)))
return true;
return x < y;
});
}
for (size_t i = 0; i < n; ++i) { // Compute 'scores'
T score = 0;
size_t count = n - f - 2;
for (auto* cursor = cpu_ranks; count > 0; ++cursor) {
auto index = *cursor;
if (pos_to_gradid[2 * index] == i || pos_to_gradid[2 * index + 1] == i) { // Associated distance concerns current gradient
score += cpu_distances[index];
--count;
}
}
cpu_scores[i] = score;
}
}
{ // Select the 'm' smallest scoring gradients and average them
size_t cpu_selected[cn]; // Index of the selected gradients
{ // Compute 'cpu_selected'
for (size_t i = 0; i < cn; ++i)
cpu_selected[i] = i;
T* cpu_scores_ptr = cpu_scores;
::std::nth_element(cpu_selected, cpu_selected + m, cpu_selected + n, [cpu_scores_ptr](size_t a, size_t b) {
auto&& x = cpu_scores_ptr[a];
if (unlikely(!::std::isfinite(x)))
return false;
auto&& y = cpu_scores_ptr[b];
if (unlikely(!::std::isfinite(y)))
return true;
return x < y;
});
}
CUDA_ASSERT(cudaMemcpyAsync(selected, cpu_selected, m * sizeof(size_t), cudaMemcpyHostToDevice, stream));
CUDArray<T const*> inputs_array{inputs.data(), inputs.size(), [](::torch::Tensor const& elem) -> T const* { return elem.data_ptr<T>(); }};
selection_average<T><<<nb_blocks(d), nb_threads_per_block, 0, stream>>>(inputs_array.data(), output.data_ptr<T>(), d, reinterpret_cast<size_t*>(selected), m);
CUDA_ASSERT_KERNEL();
CUDA_ASSERT(cudaStreamSynchronize(stream)); // FIXME: Really needed?
}
// Return
return output;
}
}
// -------------------------------------------------------------------------- //
// Rule member function definitions
/** Forward to the specialized aggregation function.
* @param ... Forwarded argument
* @return Forwarded return value
**/
::torch::Tensor Krum::aggregate_gpu_float(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
return aggregate<float>(inputs, f, m);
}
::torch::Tensor Krum::aggregate_gpu_double(::std::vector<::torch::Tensor> const& inputs, size_t f, size_t m) {
return aggregate<double>(inputs, f, m);
}
|
d2a11430a61894163d4b5fa5b883a07fa2a4f4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "scan.cuh"
#include <iostream>
using namespace std;
int main(int argc, const char *argv[]) {
string N, Threads;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
float *in = new float[n];
float *out = new float[n];
for (unsigned int i = 0; i < n; i++) {
in[i] = 1;
out[i] = 0;
}
unsigned int threads_per_block = 1024;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
scan(in, out, n, threads_per_block);
hipEventRecord(stop);
hipEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << out[n - 1] << endl;
cout << ms << endl;
delete[] out;
delete[] in;
return 0;
}
| d2a11430a61894163d4b5fa5b883a07fa2a4f4d5.cu | #include "scan.cuh"
#include <iostream>
using namespace std;
int main(int argc, const char *argv[]) {
string N, Threads;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
float *in = new float[n];
float *out = new float[n];
for (unsigned int i = 0; i < n; i++) {
in[i] = 1;
out[i] = 0;
}
unsigned int threads_per_block = 1024;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
scan(in, out, n, threads_per_block);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << out[n - 1] << endl;
cout << ms << endl;
delete[] out;
delete[] in;
return 0;
}
|
4a9d59930d7909d617af150ab3d745bbd0ef514d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sqr_norm_kernel(int n, float *out, float *data, float rows, float cols)
{
extern __shared__ float sdata[];
int i = blockDim.x * threadIdx.y + threadIdx.x;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
sdata[i] = 0;
sdata[i] = data[threadId] * data[threadId] + data[threadId + 1] * data[threadId + 1];
__syncthreads();
for (unsigned int s = (blockDim.x * blockDim.y + 1) / 2, old_s = blockDim.x * blockDim.y; s > 0; s >>= 1) {
if (old_s & 1) s += 1;
if (i < s && i + s < old_s) {
sdata[i] += sdata[i + s];
}
old_s = s;
__syncthreads();
}
if (i == 0) {
atomicAdd(&out[blockId / n], sdata[0] / (rows * cols));
}
} | 4a9d59930d7909d617af150ab3d745bbd0ef514d.cu | #include "includes.h"
__global__ void sqr_norm_kernel(int n, float *out, float *data, float rows, float cols)
{
extern __shared__ float sdata[];
int i = blockDim.x * threadIdx.y + threadIdx.x;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
sdata[i] = 0;
sdata[i] = data[threadId] * data[threadId] + data[threadId + 1] * data[threadId + 1];
__syncthreads();
for (unsigned int s = (blockDim.x * blockDim.y + 1) / 2, old_s = blockDim.x * blockDim.y; s > 0; s >>= 1) {
if (old_s & 1) s += 1;
if (i < s && i + s < old_s) {
sdata[i] += sdata[i + s];
}
old_s = s;
__syncthreads();
}
if (i == 0) {
atomicAdd(&out[blockId / n], sdata[0] / (rows * cols));
}
} |
c42442817cada181048794df61e1c9634ae0b769.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <unordered_map>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cublas.hpp>
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
hipblasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
for(int itr=0; itr<5; itr++) {
std::cout << "iteration " << itr << '\n';
hipError_t cudaStat;
hipblasStatus_t stat;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * M + j + 1);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
auto beg = std::chrono::steady_clock::now();
auto handle = tf::cublas_per_thread_handle(0);
auto end = std::chrono::steady_clock::now();
std::cout << "create handle: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
//int version;
//cublasGetVersion(handle, &version);
//std::cout << "version is " << version << '\n';
beg = std::chrono::steady_clock::now();
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrA);
//hipblasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "set matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
end = std::chrono::steady_clock::now();
std::cout << "modify matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrA);
//hipblasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "get matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
hipFree (devPtrA);
//beg = std::chrono::steady_clock::now();
//hipblasDestroy(handle);
//end = std::chrono::steady_clock::now();
//std::cout << "destroy handle: "
// << std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
// << " us\n";
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
}
return EXIT_SUCCESS;
}
| c42442817cada181048794df61e1c9634ae0b769.cu | #include <chrono>
#include <unordered_map>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cublas.hpp>
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
cublasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
for(int itr=0; itr<5; itr++) {
std::cout << "iteration " << itr << '\n';
cudaError_t cudaStat;
cublasStatus_t stat;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * M + j + 1);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
auto beg = std::chrono::steady_clock::now();
auto handle = tf::cublas_per_thread_handle(0);
auto end = std::chrono::steady_clock::now();
std::cout << "create handle: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
//int version;
//cublasGetVersion(handle, &version);
//std::cout << "version is " << version << '\n';
beg = std::chrono::steady_clock::now();
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrA);
//cublasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "set matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
end = std::chrono::steady_clock::now();
std::cout << "modify matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrA);
//cublasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "get matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
cudaFree (devPtrA);
//beg = std::chrono::steady_clock::now();
//cublasDestroy(handle);
//end = std::chrono::steady_clock::now();
//std::cout << "destroy handle: "
// << std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
// << " us\n";
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
}
return EXIT_SUCCESS;
}
|
96f26318e768b0d45f6f73d64f657488bb5d2acc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudabridge.h"
__global__ void keyFinderKernel(int points, int compression, int searchMode);
__global__ void keyFinderKernelWithDouble(int points, int compression, int searchMode);
void callKeyFinderKernel(int blocks, int threads, int points, bool useDouble, int compression, int searchMode)
{
if (useDouble) {
keyFinderKernelWithDouble << <blocks, threads >> > (points, compression, searchMode);
}
else {
keyFinderKernel << <blocks, threads >> > (points, compression, searchMode);
}
waitForKernel();
}
void waitForKernel()
{
// Check for kernel launch error
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
throw cuda::CudaException(err);
}
// Wait for kernel to complete
err = hipDeviceSynchronize();
fflush(stdout);
if (err != hipSuccess) {
throw cuda::CudaException(err);
}
} | 96f26318e768b0d45f6f73d64f657488bb5d2acc.cu | #include "cudabridge.h"
__global__ void keyFinderKernel(int points, int compression, int searchMode);
__global__ void keyFinderKernelWithDouble(int points, int compression, int searchMode);
void callKeyFinderKernel(int blocks, int threads, int points, bool useDouble, int compression, int searchMode)
{
if (useDouble) {
keyFinderKernelWithDouble << <blocks, threads >> > (points, compression, searchMode);
}
else {
keyFinderKernel << <blocks, threads >> > (points, compression, searchMode);
}
waitForKernel();
}
void waitForKernel()
{
// Check for kernel launch error
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw cuda::CudaException(err);
}
// Wait for kernel to complete
err = cudaDeviceSynchronize();
fflush(stdout);
if (err != cudaSuccess) {
throw cuda::CudaException(err);
}
} |
d6dcc42a6800b04fbba52fe53b9ce9e372fb98da.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Universit Pierre et Marie Curie
* Calcul de transport de neutrons
* Version squentielle
*/
// nvcc -I/usr/mpi/gcc/openmpi-1.4.6/include -L/usr/mpi/gcc/openmpi-1.4.6/lib64 -lmpi neutron-mpi.cu -o neutron-mpi
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include<hiprand/hiprand_kernel.h>
#include <thrust/remove.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define THREADS_PER_BLOCK 1024
#define OUTPUT_FILE "/tmp/mpi-absorbed.dat"
char info[] = "\
Usage:\n\
neutron-mpi-cuda H Nb C_c C_s\n\
\n\
H : paisseur de la plaque\n\
Nb : nombre d'chantillons\n\
C_c: composante absorbante\n\
C_s: componente diffusante\n\
\n\
Exemple d'execution : \n\
neutron-mpi 1.0 500000000 0.5 0.5\n\
";
/*
* gnrateur uniforme de nombres alatoires dans l'intervalle [0,1)
*/
struct drand48_data alea_buffer;
struct is_not_zero
{
__host__ __device__
bool operator()(float x)
{
return x == 0;
}
};
struct is_even
{
__host__ __device__
bool operator()(const int x)
{
return (x % 2) == 0;
}
};
void init_uniform_random_number() {
srand48_r(0, &alea_buffer);
}
float uniform_random_number() {
double res = 0.0;
drand48_r(&alea_buffer,&res);
return res;
}
/*
* notre gettimeofday()
*/
double my_gettimeofday(){
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
/*
* main()
*/
__global__ void neutron_gpu(int n,int* r,int* t,int* b, float* absorbed,float c, float c_c, float c_s, float h, int my_rank, int nb_proc){
// distance parcourue par le neutron avant la collision
float L;
// direction du neutron (0 <= d <= PI)
float d;
// variable alatoire uniforme
float u;
// position de la particule (0 <= x <= h)
float x;
//(n,r,t,b,absorbed,c,c_c,c_s,L,h,d,x,u)
int j, old;
unsigned int seed;
hiprandState_t state;
j = threadIdx.x+blockIdx.x*blockDim.x;
seed = j;
hiprand_init(seed, 0, 0, &state);
/*if (j == 0)
printf(" j=%d r=%d t=%d b=%d\n",j,*r, *t, *b);*/
if(j<n/nb_proc){
d = 0.0;
x = 0.0;
while (1) {
u = hiprand_uniform(&state);
L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0) {
atomicAdd(r, 1);
break;
} else if (x >= h) {
atomicAdd(t, 1);
break;
}
else if ((u = hiprand_uniform(&state)) < c_c / c) {
old = atomicAdd(b, 1);
absorbed[old] = x;
/* if(absorbed[*b]==0){
printf("x=%f et *b=%d\n",x,*b);
} */
break;
} else {
u = hiprand_uniform(&state);
d = u * M_PI;
}
}
}
}
int main(int argc, char *argv[]) {
// La distance moyenne entre les interactions neutron/atome est 1/c.
// c_c et c_s sont les composantes absorbantes et diffusantes de c.
float c, c_c, c_s;
// paisseur de la plaque
float h;
// nombre d'chantillons
int n;
// nombre de neutrons reflchis, absorbs et transmis
int r, b, t,global_b,global_r,global_t;
// chronometrage
double start, finish;
//int i, j = 0; // compteurs
float* g_absorbed;
int *gpu_r, *gpu_t, *gpu_b;
int my_rank, nb_proc;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);
if( argc == 1)
fprintf( stderr, "%s\n", info);
// valeurs par defaut
h = 1.0;
n = 500000000;//500000000
c_c = 0.5;
c_s = 0.5;
// recuperation des parametres
if (argc > 1)
h = atof(argv[1]);
if (argc > 2)
n = atoi(argv[2]);
if (argc > 3)
c_c = atof(argv[3]);
if (argc > 4)
c_s = atof(argv[4]);
r = b = t = 0;
c = c_c + c_s;
// affichage des parametres pour verificatrion
printf("paisseur de la plaque : %4.g\n", h);
printf("Nombre d'chantillons : %d\n", n);
printf("C_c : %g\n", c_c);
printf("C_s : %g\n", c_s);
float* sub_absorbed = (float *)calloc(n/nb_proc, sizeof(float));
int NB_BLOCK=(n+THREADS_PER_BLOCK*nb_proc-1)/(THREADS_PER_BLOCK*nb_proc);
//Barriere pour dcompter temps
MPI_Barrier(MPI_COMM_WORLD);
start = my_gettimeofday();
//MPI Scatter pour diviser le tableau sur nb_proc mais trop couteux
//MPI_Scatter(absorbed,n/nb_proc, MPI_FLOAT, sub_absorbed,n/nb_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
//ALLOCATION GPU
hipMalloc((void**)&g_absorbed, (n/nb_proc)*sizeof(float));
hipMalloc((void**)&gpu_b, sizeof(int));
hipMalloc((void**)&gpu_r, sizeof(int));
hipMalloc((void**)&gpu_t, sizeof(int));
//COPIE CPU -> GPU
hipMemcpy(gpu_r, &r, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_t, &t, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, &b, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(g_absorbed, sub_absorbed,(n/nb_proc)*sizeof(float), hipMemcpyHostToDevice);
//APPEL AU KERNEL
hipLaunchKernelGGL(( neutron_gpu), dim3(NB_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, n, gpu_r, gpu_t, gpu_b, g_absorbed, c, c_c, c_s, h, my_rank, nb_proc);
hipDeviceSynchronize();
// fin du chronometrage
//COPIE GPU -> CPU
hipMemcpy(&b, gpu_b, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_absorbed, g_absorbed,b*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&r, gpu_r, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&t, gpu_t, sizeof(int), hipMemcpyDeviceToHost);
//MPI_Gather pour rassembler le tableau et ecrire le tableau dans le fichier plus facilement mais trop couteux aussi
//MPI_Gather(sub_absorbed,n/nb_proc, MPI_FLOAT, absorbed,n/nb_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
//MPI_Reduce pour somme de r b et t
MPI_Reduce(&b, &global_b, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Reduce(&t, &global_t, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Reduce(&r, &global_r, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//Barriere pour dcompter temps
finish = my_gettimeofday();
if(my_rank==0){
printf("Nombre neutrons reflchis : %d\n",global_r);
printf("Nombre neutrons absorbs : %d\n",global_b);
printf("Nombre neutrons transmis : %d\n",global_t);
printf("\nPourcentage des neutrons reflchis : %4.2g\n", (float) global_r / (float) n);
printf("Pourcentage des neutrons absorbs : %4.2g\n", (float) global_b / (float) n);
printf("Pourcentage des neutrons transmis : %4.2g\n", (float) global_t / (float) n);
printf("\nTemps total de calcul: %.8g sec\n", finish - start);
printf("Millions de neutrons /s: %.2g\n", (double) n / ((finish - start)*1e6));
}
// ouverture du fichier pour ecrire les positions des neutrons absorbs
/*
MPI_Status status;
MPI_File fh;
MPI_File_open(MPI_COMM_SELF, OUTPUT_FILE,MPI_MODE_CREATE | MPI_MODE_WRONLY,MPI_INFO_NULL,&fh);
MPI_Offset displace=my_rank*sizeof(char)*42*b*sizeof("\n");
MPI_File_set_view (fh, displace,MPI_FLOAT,MPI_FLOAT, "native" ,MPI_INFO_NULL);
for (int i=0; i < b; i++){
char buf[42];
//fprintf(f,"%d \n",i);
snprintf(buf,42,"%f \n",sub_absorbed[i]);
MPI_File_write(fh,buf,strlen(buf), MPI_CHAR,&status);
}
MPI_File_close(&fh);
printf("Result written in " OUTPUT_FILE "\n");
*/
hipFree(g_absorbed);
hipFree(gpu_r);
hipFree(gpu_t);
hipFree(gpu_b);
MPI_Finalize();
return EXIT_SUCCESS;
}
| d6dcc42a6800b04fbba52fe53b9ce9e372fb98da.cu | /*
* Université Pierre et Marie Curie
* Calcul de transport de neutrons
* Version séquentielle
*/
// nvcc -I/usr/mpi/gcc/openmpi-1.4.6/include -L/usr/mpi/gcc/openmpi-1.4.6/lib64 -lmpi neutron-mpi.cu -o neutron-mpi
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include<curand_kernel.h>
#include <thrust/remove.h>
#include <mpi.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define THREADS_PER_BLOCK 1024
#define OUTPUT_FILE "/tmp/mpi-absorbed.dat"
char info[] = "\
Usage:\n\
neutron-mpi-cuda H Nb C_c C_s\n\
\n\
H : épaisseur de la plaque\n\
Nb : nombre d'échantillons\n\
C_c: composante absorbante\n\
C_s: componente diffusante\n\
\n\
Exemple d'execution : \n\
neutron-mpi 1.0 500000000 0.5 0.5\n\
";
/*
* générateur uniforme de nombres aléatoires dans l'intervalle [0,1)
*/
struct drand48_data alea_buffer;
struct is_not_zero
{
__host__ __device__
bool operator()(float x)
{
return x == 0;
}
};
struct is_even
{
__host__ __device__
bool operator()(const int x)
{
return (x % 2) == 0;
}
};
void init_uniform_random_number() {
srand48_r(0, &alea_buffer);
}
float uniform_random_number() {
double res = 0.0;
drand48_r(&alea_buffer,&res);
return res;
}
/*
* notre gettimeofday()
*/
double my_gettimeofday(){
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
/*
* main()
*/
__global__ void neutron_gpu(int n,int* r,int* t,int* b, float* absorbed,float c, float c_c, float c_s, float h, int my_rank, int nb_proc){
// distance parcourue par le neutron avant la collision
float L;
// direction du neutron (0 <= d <= PI)
float d;
// variable aléatoire uniforme
float u;
// position de la particule (0 <= x <= h)
float x;
//(n,r,t,b,absorbed,c,c_c,c_s,L,h,d,x,u)
int j, old;
unsigned int seed;
curandState state;
j = threadIdx.x+blockIdx.x*blockDim.x;
seed = j;
curand_init(seed, 0, 0, &state);
/*if (j == 0)
printf(" j=%d r=%d t=%d b=%d\n",j,*r, *t, *b);*/
if(j<n/nb_proc){
d = 0.0;
x = 0.0;
while (1) {
u = curand_uniform(&state);
L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0) {
atomicAdd(r, 1);
break;
} else if (x >= h) {
atomicAdd(t, 1);
break;
}
else if ((u = curand_uniform(&state)) < c_c / c) {
old = atomicAdd(b, 1);
absorbed[old] = x;
/* if(absorbed[*b]==0){
printf("x=%f et *b=%d\n",x,*b);
} */
break;
} else {
u = curand_uniform(&state);
d = u * M_PI;
}
}
}
}
int main(int argc, char *argv[]) {
// La distance moyenne entre les interactions neutron/atome est 1/c.
// c_c et c_s sont les composantes absorbantes et diffusantes de c.
float c, c_c, c_s;
// épaisseur de la plaque
float h;
// nombre d'échantillons
int n;
// nombre de neutrons refléchis, absorbés et transmis
int r, b, t,global_b,global_r,global_t;
// chronometrage
double start, finish;
//int i, j = 0; // compteurs
float* g_absorbed;
int *gpu_r, *gpu_t, *gpu_b;
int my_rank, nb_proc;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);
if( argc == 1)
fprintf( stderr, "%s\n", info);
// valeurs par defaut
h = 1.0;
n = 500000000;//500000000
c_c = 0.5;
c_s = 0.5;
// recuperation des parametres
if (argc > 1)
h = atof(argv[1]);
if (argc > 2)
n = atoi(argv[2]);
if (argc > 3)
c_c = atof(argv[3]);
if (argc > 4)
c_s = atof(argv[4]);
r = b = t = 0;
c = c_c + c_s;
// affichage des parametres pour verificatrion
printf("Épaisseur de la plaque : %4.g\n", h);
printf("Nombre d'échantillons : %d\n", n);
printf("C_c : %g\n", c_c);
printf("C_s : %g\n", c_s);
float* sub_absorbed = (float *)calloc(n/nb_proc, sizeof(float));
int NB_BLOCK=(n+THREADS_PER_BLOCK*nb_proc-1)/(THREADS_PER_BLOCK*nb_proc);
//Barriere pour décompter temps
MPI_Barrier(MPI_COMM_WORLD);
start = my_gettimeofday();
//MPI Scatter pour diviser le tableau sur nb_proc mais trop couteux
//MPI_Scatter(absorbed,n/nb_proc, MPI_FLOAT, sub_absorbed,n/nb_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
//ALLOCATION GPU
cudaMalloc((void**)&g_absorbed, (n/nb_proc)*sizeof(float));
cudaMalloc((void**)&gpu_b, sizeof(int));
cudaMalloc((void**)&gpu_r, sizeof(int));
cudaMalloc((void**)&gpu_t, sizeof(int));
//COPIE CPU -> GPU
cudaMemcpy(gpu_r, &r, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_t, &t, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, &b, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(g_absorbed, sub_absorbed,(n/nb_proc)*sizeof(float), cudaMemcpyHostToDevice);
//APPEL AU KERNEL
neutron_gpu<<<NB_BLOCK,THREADS_PER_BLOCK>>>(n, gpu_r, gpu_t, gpu_b, g_absorbed, c, c_c, c_s, h, my_rank, nb_proc);
cudaDeviceSynchronize();
// fin du chronometrage
//COPIE GPU -> CPU
cudaMemcpy(&b, gpu_b, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_absorbed, g_absorbed,b*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&r, gpu_r, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&t, gpu_t, sizeof(int), cudaMemcpyDeviceToHost);
//MPI_Gather pour rassembler le tableau et ecrire le tableau dans le fichier plus facilement mais trop couteux aussi
//MPI_Gather(sub_absorbed,n/nb_proc, MPI_FLOAT, absorbed,n/nb_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
//MPI_Reduce pour somme de r b et t
MPI_Reduce(&b, &global_b, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Reduce(&t, &global_t, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Reduce(&r, &global_r, 1, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//Barriere pour décompter temps
finish = my_gettimeofday();
if(my_rank==0){
printf("Nombre neutrons refléchis : %d\n",global_r);
printf("Nombre neutrons absorbés : %d\n",global_b);
printf("Nombre neutrons transmis : %d\n",global_t);
printf("\nPourcentage des neutrons refléchis : %4.2g\n", (float) global_r / (float) n);
printf("Pourcentage des neutrons absorbés : %4.2g\n", (float) global_b / (float) n);
printf("Pourcentage des neutrons transmis : %4.2g\n", (float) global_t / (float) n);
printf("\nTemps total de calcul: %.8g sec\n", finish - start);
printf("Millions de neutrons /s: %.2g\n", (double) n / ((finish - start)*1e6));
}
// ouverture du fichier pour ecrire les positions des neutrons absorbés
/*
MPI_Status status;
MPI_File fh;
MPI_File_open(MPI_COMM_SELF, OUTPUT_FILE,MPI_MODE_CREATE | MPI_MODE_WRONLY,MPI_INFO_NULL,&fh);
MPI_Offset displace=my_rank*sizeof(char)*42*b*sizeof("\n");
MPI_File_set_view (fh, displace,MPI_FLOAT,MPI_FLOAT, "native" ,MPI_INFO_NULL);
for (int i=0; i < b; i++){
char buf[42];
//fprintf(f,"%d \n",i);
snprintf(buf,42,"%f \n",sub_absorbed[i]);
MPI_File_write(fh,buf,strlen(buf), MPI_CHAR,&status);
}
MPI_File_close(&fh);
printf("Result written in " OUTPUT_FILE "\n");
*/
cudaFree(g_absorbed);
cudaFree(gpu_r);
cudaFree(gpu_t);
cudaFree(gpu_b);
MPI_Finalize();
return EXIT_SUCCESS;
}
|
45e5784f404ba56c0ce8dc835a3f210b45740e69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 John Biddiscombe
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hpx/local/future.hpp>
#include <hpx/async_cuda/cuda_executor.hpp>
#include <hpx/async_cuda/custom_gpu_api.hpp>
#include <cstddef>
__global__ void saxpy(int n, float a, float* x, float* y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
void launch_saxpy_kernel(hpx::cuda::experimental::cuda_executor& cudaexec,
unsigned int& blocks, unsigned int& threads, void** args)
{
// Invoking hpx::apply with cudaLaunchKernel<void> directly result in an
// error for NVCC with gcc configuration
#ifdef HPX_HAVE_HIP
auto launch_kernel = cudaLaunchKernel;
#else
auto launch_kernel = cudaLaunchKernel<void>;
#endif
hpx::apply(cudaexec, launch_kernel, reinterpret_cast<const void*>(&saxpy),
dim3(blocks), dim3(threads), args, std::size_t(0));
}
| 45e5784f404ba56c0ce8dc835a3f210b45740e69.cu | // Copyright (c) 2018 John Biddiscombe
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hpx/local/future.hpp>
#include <hpx/async_cuda/cuda_executor.hpp>
#include <hpx/async_cuda/custom_gpu_api.hpp>
#include <cstddef>
__global__ void saxpy(int n, float a, float* x, float* y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
void launch_saxpy_kernel(hpx::cuda::experimental::cuda_executor& cudaexec,
unsigned int& blocks, unsigned int& threads, void** args)
{
// Invoking hpx::apply with cudaLaunchKernel<void> directly result in an
// error for NVCC with gcc configuration
#ifdef HPX_HAVE_HIP
auto launch_kernel = cudaLaunchKernel;
#else
auto launch_kernel = cudaLaunchKernel<void>;
#endif
hpx::apply(cudaexec, launch_kernel, reinterpret_cast<const void*>(&saxpy),
dim3(blocks), dim3(threads), args, std::size_t(0));
}
|
e39cb30ff72154763ac324e549989ec0ef69eef5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include "histogram1024.cuh"
#include <gloop/statistics.h>
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
typedef gloop::Global LoopType;
GLOOP_VISIBILITY_HIDDEN
static __device__ void addData1024(volatile unsigned int* s_WarpHist, unsigned int data, unsigned int threadTag)
{
unsigned int count;
do {
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
GLOOP_VISIBILITY_HIDDEN
static __device__ void performHistogram(gloop::DeviceLoop<LoopType>* loop, unsigned int* d_Result, float* d_Data, float minimum, float maximum, int dataN, int cursor)
{
//Current global thread index
const int globalTid = IMUL(loop->logicalBlockIdx().x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, loop->logicalGridDim().x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
extern volatile __shared__ unsigned int s_Hist[];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for (int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for (int pos = globalTid + cursor * numThreads;; pos += numThreads, ++cursor) {
int result = pos < dataN;
if (result) {
unsigned int data4 = ((d_Data[pos] - minimum) / (maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
if (__syncthreads_and(!result)) {
break;
}
if (gloop::loop::postTaskIfNecessary(loop,
[=] (gloop::DeviceLoop<LoopType>* loop) {
performHistogram(loop, d_Result, d_Data, minimum, maximum, dataN, cursor + 1);
})) {
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
return;
}
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
static unsigned int* d_Result1024;
//Internal memory allocation
void initHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipMalloc((void**)&d_Result1024, HISTOGRAM_SIZE));
}
//Internal memory deallocation
void closeHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipFree(d_Result1024));
}
//histogram1024 CPU front-end
void histogram1024GPU(
gloop::HostLoop& hostLoop,
gloop::HostContext& hostContext,
unsigned int* h_Result,
float* d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop.kernelLock());
checkCudaErrors(hipMemset(d_Result1024, 0, HISTOGRAM_SIZE));
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hostLoop.launchWithSharedMemory<LoopType>(hostContext, dim3(BLOCK_N), dim3(BLOCK_N), dim3(THREAD_N), sizeof(unsigned int) * BLOCK_MEMORY, [] __device__(
gloop::DeviceLoop<LoopType>* loop,
unsigned int* d_Result,
float* d_Data,
float minimum,
float maximum,
int dataN
) {
performHistogram(loop, d_Result, d_Data, minimum, maximum, dataN, 0);
}, d_Result1024, d_Data, minimum, maximum, dataN);
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop.kernelLock());
checkCudaErrors(hipMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, hipMemcpyDeviceToHost));
}
}
| e39cb30ff72154763ac324e549989ec0ef69eef5.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include "histogram1024.cuh"
#include <gloop/statistics.h>
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
typedef gloop::Global LoopType;
GLOOP_VISIBILITY_HIDDEN
static __device__ void addData1024(volatile unsigned int* s_WarpHist, unsigned int data, unsigned int threadTag)
{
unsigned int count;
do {
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
GLOOP_VISIBILITY_HIDDEN
static __device__ void performHistogram(gloop::DeviceLoop<LoopType>* loop, unsigned int* d_Result, float* d_Data, float minimum, float maximum, int dataN, int cursor)
{
//Current global thread index
const int globalTid = IMUL(loop->logicalBlockIdx().x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, loop->logicalGridDim().x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
extern volatile __shared__ unsigned int s_Hist[];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for (int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for (int pos = globalTid + cursor * numThreads;; pos += numThreads, ++cursor) {
int result = pos < dataN;
if (result) {
unsigned int data4 = ((d_Data[pos] - minimum) / (maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
if (__syncthreads_and(!result)) {
break;
}
if (gloop::loop::postTaskIfNecessary(loop,
[=] (gloop::DeviceLoop<LoopType>* loop) {
performHistogram(loop, d_Result, d_Data, minimum, maximum, dataN, cursor + 1);
})) {
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
return;
}
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
static unsigned int* d_Result1024;
//Internal memory allocation
void initHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaMalloc((void**)&d_Result1024, HISTOGRAM_SIZE));
}
//Internal memory deallocation
void closeHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaFree(d_Result1024));
}
//histogram1024 CPU front-end
void histogram1024GPU(
gloop::HostLoop& hostLoop,
gloop::HostContext& hostContext,
unsigned int* h_Result,
float* d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop.kernelLock());
checkCudaErrors(cudaMemset(d_Result1024, 0, HISTOGRAM_SIZE));
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hostLoop.launchWithSharedMemory<LoopType>(hostContext, dim3(BLOCK_N), dim3(BLOCK_N), dim3(THREAD_N), sizeof(unsigned int) * BLOCK_MEMORY, [] __device__(
gloop::DeviceLoop<LoopType>* loop,
unsigned int* d_Result,
float* d_Data,
float minimum,
float maximum,
int dataN
) {
performHistogram(loop, d_Result, d_Data, minimum, maximum, dataN, 0);
}, d_Result1024, d_Data, minimum, maximum, dataN);
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop.kernelLock());
checkCudaErrors(cudaMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, cudaMemcpyDeviceToHost));
}
}
|
84acb98e0602a0d9f56cfce7b3c94d7f39a4c38c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | 84acb98e0602a0d9f56cfce7b3c94d7f39a4c38c.cu | #include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} |
8f6ae64ed216d3d34a39f15427f341bcc9b72d40.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaLBFGS/lbfgs.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
using namespace std;
class cpu_rosenbrock_nd : public cpu_cost_function
{
public:
cpu_rosenbrock_nd(size_t n)
: cpu_cost_function(n) {
if (n % 2 != 0)
{
std::cerr << "Generalized Rosenbrock is only defined for even number of unknowns." << std::endl;
std::exit(-1);
}
}
void cpu_f(const floatdouble *h_x, floatdouble *h_y)
{
*h_y = 0.0f;
for (size_t i = 0; i < m_numDimensions / 2; ++i)
{
const floatdouble x0 = h_x[2*i+0];
const floatdouble x1 = h_x[2*i+1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const floatdouble a = (1.0 - x0);
const floatdouble b = (x1 - x0 * x0) ;
*h_y += (a*a) + 100.0f * (b*b);
}
}
void cpu_gradf(const floatdouble *h_x, floatdouble *h_grad)
{
for (size_t i = 0; i < m_numDimensions / 2; ++i)
{
const floatdouble x0 = h_x[2*i+0];
const floatdouble x1 = h_x[2*i+1];
// df/dx0 = -2 (1-x0) - 400 (x1-x0^2) x0
// df/dx1 = 200 (x1 - x0^2)
h_grad[2*i+0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
h_grad[2*i+1] = 200.0f * (x1 - x0*x0);
}
}
void cpu_f_gradf(const floatdouble *h_x, floatdouble *h_f, floatdouble *h_gradf)
{
cpu_f(h_x, h_f);
cpu_gradf(h_x, h_gradf);
}
};
namespace gpu_rosenbrock_d
{
__device__ static void myAtomicAdd(float *address, float value)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(address, value);
#else
// cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
#endif
}
__global__ void kernelF(const float *d_x, float *d_y, float *d_grad, size_t len)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len)
return;
float res = 0.0f;
//for (size_t i = 0; i < batch; i += 2)
{
const float x0 = d_x[2*index+0];
const float x1 = d_x[2*index+1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const float a = (1.0 - x0);
const float b = (x1 - x0 * x0);
res += (a*a) + 100.0f * (b*b);
d_grad[2*index+0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
d_grad[2*index+1] = 200.0f * (x1 - x0*x0);
}
__shared__ float s_sum; // block local aggregate
s_sum = 0.0f;
__syncthreads(); // wait for all to initialize
myAtomicAdd(&s_sum, res);
__syncthreads();
if (threadIdx.x == 0)
myAtomicAdd(d_y, s_sum);
}
}
class gpu_rosenbrock_nd : public cost_function
{
public:
gpu_rosenbrock_nd(size_t n)
: cost_function(n) {
if (n % 2 != 0)
{
std::cerr << "Generalized Rosenbrock is only defined for even number of unknowns." << std::endl;
std::exit(-1);
}
m_numBatch = 4;
while (n % m_numBatch != 0)
m_numBatch >>= 1;
}
void f_gradf(const float *d_x, float *d_f, float *d_grad)
{
size_t launches = m_numDimensions / 2;
dim3 blockDim(512);
dim3 gridDim((launches % blockDim.x) == 0 ? (launches / blockDim.x)
: (launches / blockDim.x) + 1);
const float zero = 0.0f;
CudaSafeCall( hipMemcpy(d_f, &zero, sizeof(float), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( gpu_rosenbrock_d::kernelF), dim3(gridDim), dim3(blockDim), 0, 0, d_x, d_f, d_grad, launches);
hipDeviceSynchronize();
}
private:
size_t m_numBatch;
};
int main(int argc, char **argv)
{
if (argc < 2)
{
std::cerr << "Usage: " << *argv << " number-of-dimensions gradient-epsilon" << std::endl;
return 1;
}
// CPU
const size_t NX = atoi(argv[1]);
cpu_rosenbrock_nd rb1(NX);
lbfgs minimizer1(rb1);
minimizer1.setGradientEpsilon(atof(argv[2]));
lbfgs::status stat;
float *x = new float[NX];
for (size_t i = 0; i < NX; ++i)
{
x[i] = i % 2 == 1 ? -1 : 2;
}
stat = minimizer1.minimize_with_host_x(x);
cout << "CPU Rosenbrock: ";
for (size_t i = 0; i < NX-1; ++i)
{
cout << x[i] << ", ";
}
cout << x[NX-1] << endl;
cout << minimizer1.statusToString(stat).c_str() << endl;
// GPU
gpu_rosenbrock_nd rb2(NX);
lbfgs minimizer2(rb2);
minimizer2.setGradientEpsilon(atof(argv[2]));
for (size_t i = 0; i < NX; ++i)
{
x[i] = i % 2 == 1 ? -1 : 2;
}
float *d_x;
CudaSafeCall( hipMalloc(&d_x, NX * sizeof(float)) );
CudaSafeCall( hipMemcpy(d_x, x, NX * sizeof(float), hipMemcpyHostToDevice) );
stat = minimizer2.minimize(d_x);
CudaSafeCall( hipMemcpy(x, d_x, NX * sizeof(float), hipMemcpyDeviceToHost) );
CudaSafeCall( hipFree(d_x) );
cout << "GPU Rosenbrock: ";
for (size_t i = 0; i < NX-1; ++i)
{
cout << x[i] << ", ";
}
cout << x[NX-1] << endl;
cout << minimizer2.statusToString(stat).c_str() << endl;
delete [] x;
return 0;
}
| 8f6ae64ed216d3d34a39f15427f341bcc9b72d40.cu | #include "CudaLBFGS/lbfgs.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
using namespace std;
class cpu_rosenbrock_nd : public cpu_cost_function
{
public:
cpu_rosenbrock_nd(size_t n)
: cpu_cost_function(n) {
if (n % 2 != 0)
{
std::cerr << "Generalized Rosenbrock is only defined for even number of unknowns." << std::endl;
std::exit(-1);
}
}
void cpu_f(const floatdouble *h_x, floatdouble *h_y)
{
*h_y = 0.0f;
for (size_t i = 0; i < m_numDimensions / 2; ++i)
{
const floatdouble x0 = h_x[2*i+0];
const floatdouble x1 = h_x[2*i+1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const floatdouble a = (1.0 - x0);
const floatdouble b = (x1 - x0 * x0) ;
*h_y += (a*a) + 100.0f * (b*b);
}
}
void cpu_gradf(const floatdouble *h_x, floatdouble *h_grad)
{
for (size_t i = 0; i < m_numDimensions / 2; ++i)
{
const floatdouble x0 = h_x[2*i+0];
const floatdouble x1 = h_x[2*i+1];
// df/dx0 = -2 (1-x0) - 400 (x1-x0^2) x0
// df/dx1 = 200 (x1 - x0^2)
h_grad[2*i+0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
h_grad[2*i+1] = 200.0f * (x1 - x0*x0);
}
}
void cpu_f_gradf(const floatdouble *h_x, floatdouble *h_f, floatdouble *h_gradf)
{
cpu_f(h_x, h_f);
cpu_gradf(h_x, h_gradf);
}
};
namespace gpu_rosenbrock_d
{
__device__ static void myAtomicAdd(float *address, float value)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(address, value);
#else
// cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
#endif
}
__global__ void kernelF(const float *d_x, float *d_y, float *d_grad, size_t len)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len)
return;
float res = 0.0f;
//for (size_t i = 0; i < batch; i += 2)
{
const float x0 = d_x[2*index+0];
const float x1 = d_x[2*index+1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const float a = (1.0 - x0);
const float b = (x1 - x0 * x0);
res += (a*a) + 100.0f * (b*b);
d_grad[2*index+0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
d_grad[2*index+1] = 200.0f * (x1 - x0*x0);
}
__shared__ float s_sum; // block local aggregate
s_sum = 0.0f;
__syncthreads(); // wait for all to initialize
myAtomicAdd(&s_sum, res);
__syncthreads();
if (threadIdx.x == 0)
myAtomicAdd(d_y, s_sum);
}
}
class gpu_rosenbrock_nd : public cost_function
{
public:
gpu_rosenbrock_nd(size_t n)
: cost_function(n) {
if (n % 2 != 0)
{
std::cerr << "Generalized Rosenbrock is only defined for even number of unknowns." << std::endl;
std::exit(-1);
}
m_numBatch = 4;
while (n % m_numBatch != 0)
m_numBatch >>= 1;
}
void f_gradf(const float *d_x, float *d_f, float *d_grad)
{
size_t launches = m_numDimensions / 2;
dim3 blockDim(512);
dim3 gridDim((launches % blockDim.x) == 0 ? (launches / blockDim.x)
: (launches / blockDim.x) + 1);
const float zero = 0.0f;
CudaSafeCall( cudaMemcpy(d_f, &zero, sizeof(float), cudaMemcpyHostToDevice) );
gpu_rosenbrock_d::kernelF<<<gridDim, blockDim>>>(d_x, d_f, d_grad, launches);
cudaDeviceSynchronize();
}
private:
size_t m_numBatch;
};
int main(int argc, char **argv)
{
if (argc < 2)
{
std::cerr << "Usage: " << *argv << " number-of-dimensions gradient-epsilon" << std::endl;
return 1;
}
// CPU
const size_t NX = atoi(argv[1]);
cpu_rosenbrock_nd rb1(NX);
lbfgs minimizer1(rb1);
minimizer1.setGradientEpsilon(atof(argv[2]));
lbfgs::status stat;
float *x = new float[NX];
for (size_t i = 0; i < NX; ++i)
{
x[i] = i % 2 == 1 ? -1 : 2;
}
stat = minimizer1.minimize_with_host_x(x);
cout << "CPU Rosenbrock: ";
for (size_t i = 0; i < NX-1; ++i)
{
cout << x[i] << ", ";
}
cout << x[NX-1] << endl;
cout << minimizer1.statusToString(stat).c_str() << endl;
// GPU
gpu_rosenbrock_nd rb2(NX);
lbfgs minimizer2(rb2);
minimizer2.setGradientEpsilon(atof(argv[2]));
for (size_t i = 0; i < NX; ++i)
{
x[i] = i % 2 == 1 ? -1 : 2;
}
float *d_x;
CudaSafeCall( cudaMalloc(&d_x, NX * sizeof(float)) );
CudaSafeCall( cudaMemcpy(d_x, x, NX * sizeof(float), cudaMemcpyHostToDevice) );
stat = minimizer2.minimize(d_x);
CudaSafeCall( cudaMemcpy(x, d_x, NX * sizeof(float), cudaMemcpyDeviceToHost) );
CudaSafeCall( cudaFree(d_x) );
cout << "GPU Rosenbrock: ";
for (size_t i = 0; i < NX-1; ++i)
{
cout << x[i] << ", ";
}
cout << x[NX-1] << endl;
cout << minimizer2.statusToString(stat).c_str() << endl;
delete [] x;
return 0;
}
|
f8b6304b86577b4ad38c29d641e2a7c4be02d4a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.hpp"
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memcopy and daxpy test of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
hipInit(0);
// initialize cublas
auto cublas_handle = get_cublas_handle();
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// start the nvprof profiling
hipProfilerStart();
// copy memory to device
auto start = get_time();
// Memcpy is blocking, commands on GPU are in a stream, are in sequential order
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
// y = y + 2 * x
double alpha = 2.0;
auto cublas_status =
hipblasDaxpy(cublas_handle, n, &alpha, x_device, 1, y_device, 1);
auto time_taken = get_time() - start;
std::cout << "time : " << time_taken << "s\n";
// copy result back to host
copy_to_host<double>(y_device, y, n);
// check for errors
int errors = 0;
#pragma omp parallel for reduction(+:errors)
for(auto i=0; i<n; ++i) {
if(::fabs(6.-y[i])>1e-15) {
errors++;
}
}
// stop the profiling session
hipProfilerStop();
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
hipFree(x_device);
hipFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
| f8b6304b86577b4ad38c29d641e2a7c4be02d4a2.cu | #include <iostream>
#include <cuda.h>
#include "util.hpp"
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memcopy and daxpy test of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
cuInit(0);
// initialize cublas
auto cublas_handle = get_cublas_handle();
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// start the nvprof profiling
cudaProfilerStart();
// copy memory to device
auto start = get_time();
// Memcpy is blocking, commands on GPU are in a stream, are in sequential order
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
// y = y + 2 * x
double alpha = 2.0;
auto cublas_status =
cublasDaxpy(cublas_handle, n, &alpha, x_device, 1, y_device, 1);
auto time_taken = get_time() - start;
std::cout << "time : " << time_taken << "s\n";
// copy result back to host
copy_to_host<double>(y_device, y, n);
// check for errors
int errors = 0;
#pragma omp parallel for reduction(+:errors)
for(auto i=0; i<n; ++i) {
if(std::fabs(6.-y[i])>1e-15) {
errors++;
}
}
// stop the profiling session
cudaProfilerStop();
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
cudaFree(x_device);
cudaFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
|
4444a5e97649308dfd4b405ade0a9e2b070fef42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_spp.h"
#include "saber/core/tensor_op.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void ker_concat_fwd(Dtype* out_data, const Dtype* in_data,
const int n,
const int w,
const int n_stride, const int nthreads) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n_id = index / w;
const int w_id = index % w;
const int out_index = n_id * n_stride + w_id;
out_data[out_index] = in_data[index];
}
}
template <DataType OpDtype>
SaberStatus SaberSpp<NV, OpDtype>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
SPPParam<NV>& param) {
const InDataType* in_data = (const InDataType*)inputs[0]->data();
OutDataType* out_data = (OutDataType*)outputs[0]->mutable_data();
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int out_n = outputs[0]->num();
int out_c = outputs[0]->channel();
int out_h = outputs[0]->height();
int out_w = outputs[0]->width();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
std::vector<OpTensor*> pool_outputs;
pool_outputs.resize(1);
for (int i = 0; i < param.pyramid_height; i++) {
pool_outputs[0] = _pooling_output[i];
(*_pooling[i])(inputs, pool_outputs, _pooling_param[i], *(this->_ctx));
int valid_size = pool_outputs[0]->valid_size();
int offset = (pow(4, i) - 1) / 3;
hipLaunchKernelGGL(( ker_concat_fwd<InDataType>), dim3(CUDA_GET_BLOCKS(valid_size)),dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data + offset,
(InDataType*) pool_outputs[0]->data(),
pool_outputs[0]->num() * pool_outputs[0]->channel(),
pool_outputs[0]->height() * pool_outputs[0]->width(),
outputs[0]->width(),
valid_size);
}
}
return SaberSuccess;
}
} //namespace saber
} //namespace anakin
| 4444a5e97649308dfd4b405ade0a9e2b070fef42.cu | #include "saber/funcs/impl/cuda/saber_spp.h"
#include "saber/core/tensor_op.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void ker_concat_fwd(Dtype* out_data, const Dtype* in_data,
const int n,
const int w,
const int n_stride, const int nthreads) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n_id = index / w;
const int w_id = index % w;
const int out_index = n_id * n_stride + w_id;
out_data[out_index] = in_data[index];
}
}
template <DataType OpDtype>
SaberStatus SaberSpp<NV, OpDtype>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
SPPParam<NV>& param) {
const InDataType* in_data = (const InDataType*)inputs[0]->data();
OutDataType* out_data = (OutDataType*)outputs[0]->mutable_data();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int out_n = outputs[0]->num();
int out_c = outputs[0]->channel();
int out_h = outputs[0]->height();
int out_w = outputs[0]->width();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
std::vector<OpTensor*> pool_outputs;
pool_outputs.resize(1);
for (int i = 0; i < param.pyramid_height; i++) {
pool_outputs[0] = _pooling_output[i];
(*_pooling[i])(inputs, pool_outputs, _pooling_param[i], *(this->_ctx));
int valid_size = pool_outputs[0]->valid_size();
int offset = (pow(4, i) - 1) / 3;
ker_concat_fwd<InDataType><<<CUDA_GET_BLOCKS(valid_size),CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data + offset,
(InDataType*) pool_outputs[0]->data(),
pool_outputs[0]->num() * pool_outputs[0]->channel(),
pool_outputs[0]->height() * pool_outputs[0]->width(),
outputs[0]->width(),
valid_size);
}
}
return SaberSuccess;
}
} //namespace saber
} //namespace anakin
|
cc7fe2cc6aa3bd94f708bb3e88971a0b6bf37871.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l.nweights);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
//l.weights_gpu = (float*)halfWeights;
//cuda_free(l.weights_gpu);
//DecGenerateMemory(l.nweights * sizeof(float));
//l.weights_gpu = (float *)halfWeights;
check_error(hipMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), hipMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
publicMemory[1]);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
cuda_convert_f16_to_f32(publicMemory[1], l.outputs, l.output_gpu);
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} | cc7fe2cc6aa3bd94f708bb3e88971a0b6bf37871.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l.nweights);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
//l.weights_gpu = (float*)halfWeights;
//cuda_free(l.weights_gpu);
//DecGenerateMemory(l.nweights * sizeof(float));
//l.weights_gpu = (float *)halfWeights;
check_error(cudaMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), cudaMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
publicMemory[1]);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
cuda_convert_f16_to_f32(publicMemory[1], l.outputs, l.output_gpu);
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} |
825446442a475a4762edf461d8eaefa91be30f0d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "prefix_sum_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "../prefix_sum_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void prefix_sum_kernel(
float * __restrict output,
const float * __restrict input,
int feature_map_segment_length,
int neuron_count_per_entry,
int neuron_count_per_feature_map,
float clamp_min,
float clamp_max,
int iteration_count)
{
int threadblock_size = blockDim.x;
int thread_id = threadIdx.x;
int neuron_id = blockIdx.x;
int feature_map_segment_id = blockIdx.y;
int entry_id = blockIdx.z;
int current_feature_map_local_id = thread_id;
int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id;
float running_sum = 0.0F;
for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size)
{
float val = 0.0F;
if (current_feature_map_local_id < feature_map_segment_length)
val = input[offset];
if (thread_id == 0)
val += running_sum;
arr_sh[thread_id] = val;
__syncthreads();
for(int d = 1; d < threadblock_size; d = d << 1)
{
if (thread_id >= d)
val += arr_sh[thread_id - d];
__syncthreads();
if (thread_id >= d)
arr_sh[thread_id] = val;
__syncthreads();
}
if (thread_id == 0)
running_sum = arr_sh[threadblock_size - 1];
__syncthreads();
if (current_feature_map_local_id < feature_map_segment_length)
output[offset] = min(max(val, clamp_min), clamp_max);
offset += threadblock_size * neuron_count_per_feature_map;
}
}
prefix_sum_layer_tester_cuda::prefix_sum_layer_tester_cuda()
{
}
prefix_sum_layer_tester_cuda::~prefix_sum_layer_tester_cuda()
{
}
void prefix_sum_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(feature_map_segment_length);
int smem_size = threadblock_size * sizeof(float);
int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length;
int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size;
hipLaunchKernelGGL(( prefix_sum_kernel), dim3(dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
feature_map_segment_length,
output_elem_count_per_entry,
output_elem_count_per_feature_map,
clamp_min,
clamp_max,
iteration_count);
}
void prefix_sum_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const prefix_sum_layer> layer_derived = nnforge_dynamic_pointer_cast<const prefix_sum_layer>(layer_schema);
feature_map_segment_length = layer_derived->feature_map_segment_length;
clamp_min = layer_derived->clamp_min;
clamp_max = layer_derived->clamp_max;
}
int prefix_sum_layer_tester_cuda::get_threadblock_size(int feature_map_segment_length)
{
int threadblock_size;
if (feature_map_segment_length < 256)
{
threadblock_size = (feature_map_segment_length + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (feature_map_segment_length + 256 - 1) / 256;
threadblock_size = (feature_map_segment_length + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
int prefix_sum_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
| 825446442a475a4762edf461d8eaefa91be30f0d.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "prefix_sum_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "../prefix_sum_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void prefix_sum_kernel(
float * __restrict output,
const float * __restrict input,
int feature_map_segment_length,
int neuron_count_per_entry,
int neuron_count_per_feature_map,
float clamp_min,
float clamp_max,
int iteration_count)
{
int threadblock_size = blockDim.x;
int thread_id = threadIdx.x;
int neuron_id = blockIdx.x;
int feature_map_segment_id = blockIdx.y;
int entry_id = blockIdx.z;
int current_feature_map_local_id = thread_id;
int offset = entry_id * neuron_count_per_entry + (feature_map_segment_id * feature_map_segment_length + current_feature_map_local_id) * neuron_count_per_feature_map + neuron_id;
float running_sum = 0.0F;
for(int i = 0; i < iteration_count; ++i, current_feature_map_local_id += threadblock_size)
{
float val = 0.0F;
if (current_feature_map_local_id < feature_map_segment_length)
val = input[offset];
if (thread_id == 0)
val += running_sum;
arr_sh[thread_id] = val;
__syncthreads();
for(int d = 1; d < threadblock_size; d = d << 1)
{
if (thread_id >= d)
val += arr_sh[thread_id - d];
__syncthreads();
if (thread_id >= d)
arr_sh[thread_id] = val;
__syncthreads();
}
if (thread_id == 0)
running_sum = arr_sh[threadblock_size - 1];
__syncthreads();
if (current_feature_map_local_id < feature_map_segment_length)
output[offset] = min(max(val, clamp_min), clamp_max);
offset += threadblock_size * neuron_count_per_feature_map;
}
}
prefix_sum_layer_tester_cuda::prefix_sum_layer_tester_cuda()
{
}
prefix_sum_layer_tester_cuda::~prefix_sum_layer_tester_cuda()
{
}
void prefix_sum_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(feature_map_segment_length);
int smem_size = threadblock_size * sizeof(float);
int feature_map_segment_count = output_configuration_specific.feature_map_count / feature_map_segment_length;
int iteration_count = (feature_map_segment_length + threadblock_size - 1) / threadblock_size;
prefix_sum_kernel<<<dim3(output_elem_count_per_feature_map, feature_map_segment_count, entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
feature_map_segment_length,
output_elem_count_per_entry,
output_elem_count_per_feature_map,
clamp_min,
clamp_max,
iteration_count);
}
void prefix_sum_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const prefix_sum_layer> layer_derived = nnforge_dynamic_pointer_cast<const prefix_sum_layer>(layer_schema);
feature_map_segment_length = layer_derived->feature_map_segment_length;
clamp_min = layer_derived->clamp_min;
clamp_max = layer_derived->clamp_max;
}
int prefix_sum_layer_tester_cuda::get_threadblock_size(int feature_map_segment_length)
{
int threadblock_size;
if (feature_map_segment_length < 256)
{
threadblock_size = (feature_map_segment_length + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (feature_map_segment_length + 256 - 1) / 256;
threadblock_size = (feature_map_segment_length + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
int prefix_sum_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
|
bf768b9884e60e23ad930507be438debce705644.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelReadMotionEnergyAsync.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gpuConvBufferl1 = NULL;
hipMalloc(&gpuConvBufferl1, XSIZE*YSIZE);
float *gpuConvBufferl2 = NULL;
hipMalloc(&gpuConvBufferl2, XSIZE*YSIZE);
int ringBufferIdx = 1;
int bsx = 1;
int bsy = 1;
int n = XSIZE*YSIZE;
float *gpuEnergyBuffer = NULL;
hipMalloc(&gpuEnergyBuffer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelReadMotionEnergyAsync), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelReadMotionEnergyAsync), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelReadMotionEnergyAsync), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bf768b9884e60e23ad930507be438debce705644.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelReadMotionEnergyAsync.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gpuConvBufferl1 = NULL;
cudaMalloc(&gpuConvBufferl1, XSIZE*YSIZE);
float *gpuConvBufferl2 = NULL;
cudaMalloc(&gpuConvBufferl2, XSIZE*YSIZE);
int ringBufferIdx = 1;
int bsx = 1;
int bsy = 1;
int n = XSIZE*YSIZE;
float *gpuEnergyBuffer = NULL;
cudaMalloc(&gpuEnergyBuffer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelReadMotionEnergyAsync<<<gridBlock,threadBlock>>>(gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelReadMotionEnergyAsync<<<gridBlock,threadBlock>>>(gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelReadMotionEnergyAsync<<<gridBlock,threadBlock>>>(gpuConvBufferl1,gpuConvBufferl2,ringBufferIdx,bsx,bsy,n,gpuEnergyBuffer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7058a8a2c855b2b8a327a7ca16fcfb6e9bedd84b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
#include <math.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *n, float *c)
{
c[threadIdx.x] = sqrt(n[threadIdx.x]);
}
/*__global__
void simple(float *c)
{
c[threadIdx.x] = threadIdx.x;
}*/
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
float *src = new float[N];
float *dst;
for(int i=0; i<N; i++)
src[i]=i*i*i*i;
hipMalloc( (void**)&cd, size );
hipMalloc( (void**)&dst, size );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipMemcpy( dst, src, size, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( simple), dim3(dimGrid), dim3(dimBlock), 0, 0, dst,cd);
//simple<<<dimGrid, dimBlock>>>(cd);
hipDeviceSynchronize();
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
hipFree( cd );
hipFree( dst);
for (int i = 0; i < N; i++)
printf("%f\n ", c[i]);
printf("\n");
delete[] c;
delete[] src;
printf("done\n");
return EXIT_SUCCESS;
}
| 7058a8a2c855b2b8a327a7ca16fcfb6e9bedd84b.cu | // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
#include <math.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *n, float *c)
{
c[threadIdx.x] = sqrt(n[threadIdx.x]);
}
/*__global__
void simple(float *c)
{
c[threadIdx.x] = threadIdx.x;
}*/
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
float *src = new float[N];
float *dst;
for(int i=0; i<N; i++)
src[i]=i*i*i*i;
cudaMalloc( (void**)&cd, size );
cudaMalloc( (void**)&dst, size );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
cudaMemcpy( dst, src, size, cudaMemcpyHostToDevice );
simple<<<dimGrid, dimBlock>>>(dst,cd);
//simple<<<dimGrid, dimBlock>>>(cd);
cudaThreadSynchronize();
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( cd );
cudaFree( dst);
for (int i = 0; i < N; i++)
printf("%f\n ", c[i]);
printf("\n");
delete[] c;
delete[] src;
printf("done\n");
return EXIT_SUCCESS;
}
|
b0611c1db0f7b4f09f84790e2046facc3cf039c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Update.h"
#include "parameters.h"
__global__ void Update_A(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy, tidx, i_cons)] += -fluxes_x[ij_sol(tidy, tidx, i_cons)] - fluxes_y[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
__global__ void Update_B(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy + 1, tidx, i_cons)] += +fluxes_y[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
__global__ void Update_C(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy, tidx + 1, i_cons)] += +fluxes_x[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
| b0611c1db0f7b4f09f84790e2046facc3cf039c9.cu | #include "Update.h"
#include "parameters.h"
__global__ void Update_A(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy, tidx, i_cons)] += -fluxes_x[ij_sol(tidy, tidx, i_cons)] - fluxes_y[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
__global__ void Update_B(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy + 1, tidx, i_cons)] += +fluxes_y[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
__global__ void Update_C(double f[], const double fluxes_x[], const double fluxes_y[])
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
if (tidx >= 0 && tidx <= lf) // Skip boundaries!
{
if (tidy >= 0 && tidy <= nf) {
{
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++) {
f[ij_sol(tidy, tidx + 1, i_cons)] += +fluxes_x[ij_sol(tidy, tidx, i_cons)];
}
}
}
}
}
|
92912fad4b4fddf29fdcb457812b9c82aea07c0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_kernel;
int xdim0_update_kernel_h = -1;
int ydim0_update_kernel_h = -1;
__constant__ int xdim1_update_kernel;
int xdim1_update_kernel_h = -1;
int ydim1_update_kernel_h = -1;
__constant__ int xdim2_update_kernel;
int xdim2_update_kernel_h = -1;
int ydim2_update_kernel_h = -1;
__constant__ int xdim3_update_kernel;
int xdim3_update_kernel_h = -1;
int ydim3_update_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC_MD3(d, x) ((x)*3 + (d))
// user function
__device__
void
update_kernel(double *rho_new, double *rhou_new, double *rhoE_new,
const double *s) {
rho_new[OPS_ACC0(0)] = rho_new[OPS_ACC0(0)] + s[OPS_ACC_MD3(0, 0)];
rhou_new[OPS_ACC1(0)] = rhou_new[OPS_ACC1(0)] + s[OPS_ACC_MD3(1, 0)];
rhoE_new[OPS_ACC2(0)] = rhoE_new[OPS_ACC2(0)] + s[OPS_ACC_MD3(2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD3
__global__ void ops_update_kernel(double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
const double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 3;
if (idx_x < size0) {
update_kernel(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 13))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(13, "update_kernel");
OPS_kernels[13].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_update_kernel_h || xdim1 != xdim1_update_kernel_h ||
xdim2 != xdim2_update_kernel_h || xdim3 != xdim3_update_kernel_h) {
hipMemcpyToSymbol(xdim0_update_kernel, &xdim0, sizeof(int));
xdim0_update_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_update_kernel, &xdim1, sizeof(int));
xdim1_update_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_update_kernel, &xdim2, sizeof(int));
xdim2_update_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_update_kernel, &xdim3, sizeof(int));
xdim3_update_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[13].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[13].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[13].mpi_time += t2 - t1;
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 92912fad4b4fddf29fdcb457812b9c82aea07c0b.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_kernel;
int xdim0_update_kernel_h = -1;
int ydim0_update_kernel_h = -1;
__constant__ int xdim1_update_kernel;
int xdim1_update_kernel_h = -1;
int ydim1_update_kernel_h = -1;
__constant__ int xdim2_update_kernel;
int xdim2_update_kernel_h = -1;
int ydim2_update_kernel_h = -1;
__constant__ int xdim3_update_kernel;
int xdim3_update_kernel_h = -1;
int ydim3_update_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC_MD3(d, x) ((x)*3 + (d))
// user function
__device__
void
update_kernel(double *rho_new, double *rhou_new, double *rhoE_new,
const double *s) {
rho_new[OPS_ACC0(0)] = rho_new[OPS_ACC0(0)] + s[OPS_ACC_MD3(0, 0)];
rhou_new[OPS_ACC1(0)] = rhou_new[OPS_ACC1(0)] + s[OPS_ACC_MD3(1, 0)];
rhoE_new[OPS_ACC2(0)] = rhoE_new[OPS_ACC2(0)] + s[OPS_ACC_MD3(2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD3
__global__ void ops_update_kernel(double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
const double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 3;
if (idx_x < size0) {
update_kernel(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 13))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(13, "update_kernel");
OPS_kernels[13].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_update_kernel_h || xdim1 != xdim1_update_kernel_h ||
xdim2 != xdim2_update_kernel_h || xdim3 != xdim3_update_kernel_h) {
cudaMemcpyToSymbol(xdim0_update_kernel, &xdim0, sizeof(int));
xdim0_update_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_update_kernel, &xdim1, sizeof(int));
xdim1_update_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_update_kernel, &xdim2, sizeof(int));
xdim2_update_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_update_kernel, &xdim3, sizeof(int));
xdim3_update_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[13].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[13].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[13].mpi_time += t2 - t1;
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
75406e6a3045eecb74c4c068654b0f012948a078.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "flashlight/lib/sequence/criterion/cuda/CriterionUtils.cuh"
#include <algorithm>
namespace {
using fl::lib::seq::CriterionScaleMode;
using namespace fl::lib::seq;
/*
* B thread blocks
* 32 threads/block (ideally)
*/
__global__ void
batchTargetSizeKernel(int L, int maxSize, const int* _target, int* targetSize) {
int b = blockIdx.x;
auto target = _target + b * L;
__shared__ int idx;
if (threadIdx.x == 0) {
idx = 0;
}
__syncthreads();
for (int i = L - 1 - threadIdx.x; i >= 0; i -= blockDim.x) {
if (target[i] >= 0) {
atomicMax(&idx, i + 1);
break;
}
}
__syncthreads();
if (threadIdx.x == 0) {
targetSize[b] = idx < maxSize ? idx : maxSize;
}
}
/*
* 1 thread block
* B threads/block (ideally)
*/
template <class Float>
__global__ void computeScaleKernel(
int B,
int T,
int /* N */,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale) {
for (int b = threadIdx.x; b < B; b += blockDim.x) {
switch (scaleMode) {
case CriterionScaleMode::NONE:
scale[b] = 1.0;
break;
case CriterionScaleMode::INPUT_SZ:
scale[b] = T > 0 ? 1.0 / T : 1.0;
break;
case CriterionScaleMode::INPUT_SZ_SQRT:
scale[b] = T > 0 ? std::sqrt(1.0 / T) : 1.0;
break;
case CriterionScaleMode::TARGET_SZ:
scale[b] = targetSize[b] > 0 ? 1.0 / targetSize[b] : 1.0;
break;
case CriterionScaleMode::TARGET_SZ_SQRT:
scale[b] = targetSize[b] > 0 ? std::sqrt(1.0 / targetSize[b]) : 1.0;
break;
default:
break;
}
}
}
} // namespace
namespace fl {
namespace lib {
namespace cuda {
template <class Float>
void CriterionUtils<Float>::batchTargetSize(
int B,
int L,
int maxSize,
const int* target,
int* targetSize,
hipStream_t stream) {
hipLaunchKernelGGL(( batchTargetSizeKernel), dim3(B), dim3(32), 0, stream, L, maxSize, target, targetSize);
}
template <class Float>
void CriterionUtils<Float>::computeScale(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale,
hipStream_t stream) {
int blockSize = ::min(256, (B + 31) / 32 * 32);
hipLaunchKernelGGL(( computeScaleKernel), dim3(1), dim3(blockSize), 0, stream,
B, T, N, scaleMode, targetSize, scale);
}
template struct CriterionUtils<float>;
template struct CriterionUtils<double>;
} // namespace cuda
} // namespace lib
} // namespace fl
| 75406e6a3045eecb74c4c068654b0f012948a078.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "flashlight/lib/sequence/criterion/cuda/CriterionUtils.cuh"
#include <algorithm>
namespace {
using fl::lib::seq::CriterionScaleMode;
using namespace fl::lib::seq;
/*
* B thread blocks
* 32 threads/block (ideally)
*/
__global__ void
batchTargetSizeKernel(int L, int maxSize, const int* _target, int* targetSize) {
int b = blockIdx.x;
auto target = _target + b * L;
__shared__ int idx;
if (threadIdx.x == 0) {
idx = 0;
}
__syncthreads();
for (int i = L - 1 - threadIdx.x; i >= 0; i -= blockDim.x) {
if (target[i] >= 0) {
atomicMax(&idx, i + 1);
break;
}
}
__syncthreads();
if (threadIdx.x == 0) {
targetSize[b] = idx < maxSize ? idx : maxSize;
}
}
/*
* 1 thread block
* B threads/block (ideally)
*/
template <class Float>
__global__ void computeScaleKernel(
int B,
int T,
int /* N */,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale) {
for (int b = threadIdx.x; b < B; b += blockDim.x) {
switch (scaleMode) {
case CriterionScaleMode::NONE:
scale[b] = 1.0;
break;
case CriterionScaleMode::INPUT_SZ:
scale[b] = T > 0 ? 1.0 / T : 1.0;
break;
case CriterionScaleMode::INPUT_SZ_SQRT:
scale[b] = T > 0 ? std::sqrt(1.0 / T) : 1.0;
break;
case CriterionScaleMode::TARGET_SZ:
scale[b] = targetSize[b] > 0 ? 1.0 / targetSize[b] : 1.0;
break;
case CriterionScaleMode::TARGET_SZ_SQRT:
scale[b] = targetSize[b] > 0 ? std::sqrt(1.0 / targetSize[b]) : 1.0;
break;
default:
break;
}
}
}
} // namespace
namespace fl {
namespace lib {
namespace cuda {
template <class Float>
void CriterionUtils<Float>::batchTargetSize(
int B,
int L,
int maxSize,
const int* target,
int* targetSize,
cudaStream_t stream) {
batchTargetSizeKernel<<<B, 32, 0, stream>>>(L, maxSize, target, targetSize);
}
template <class Float>
void CriterionUtils<Float>::computeScale(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale,
cudaStream_t stream) {
int blockSize = std::min(256, (B + 31) / 32 * 32);
computeScaleKernel<<<1, blockSize, 0, stream>>>(
B, T, N, scaleMode, targetSize, scale);
}
template struct CriterionUtils<float>;
template struct CriterionUtils<double>;
} // namespace cuda
} // namespace lib
} // namespace fl
|
e46b62df27785c29821485b318560e8aca35de4e.hip | // !!! This is a file automatically generated by hipify!!!
/* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <hip/hip_runtime.h>
#include <limits>
#include "data/types.h"
#include "tensors/tensor.h"
#include "translator/helpers.h"
namespace marian {
namespace gpu {
__global__ void gSetColumn(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumn(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
int nThreads = ::min(512, nRows);
hipLaunchKernelGGL(( gSetColumn), dim3(nBlocks), dim3(nThreads), 0, 0, in_->data(), nColumns, nRows, col, value);
}
void suppressUnk(Expr probs) {
SetColumn(probs->val(), UNK_ID, std::numeric_limits<float>::lowest());
}
void suppressWord(Expr probs, Word id) {
SetColumn(probs->val(), id, std::numeric_limits<float>::lowest());
}
}
}
| e46b62df27785c29821485b318560e8aca35de4e.cu | /* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <cuda.h>
#include <limits>
#include "data/types.h"
#include "tensors/tensor.h"
#include "translator/helpers.h"
namespace marian {
namespace gpu {
__global__ void gSetColumn(float* d_in,
size_t n_columns,
size_t n_rows,
size_t noColumn,
float value) {
size_t rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
size_t index = noColumn + rowNumber * n_columns;
if(index < n_columns * n_rows) {
d_in[index] = value;
}
}
void SetColumn(Tensor in_, size_t col, float value) {
int nRows = in_->shape().elements() / in_->shape()[-1];
int nColumns = in_->shape()[-1];
int nBlocks = nRows / 512 + ((nRows % 512 == 0) ? 0 : 1);
int nThreads = std::min(512, nRows);
gSetColumn<<<nBlocks, nThreads>>>(in_->data(), nColumns, nRows, col, value);
}
void suppressUnk(Expr probs) {
SetColumn(probs->val(), UNK_ID, std::numeric_limits<float>::lowest());
}
void suppressWord(Expr probs, Word id) {
SetColumn(probs->val(), id, std::numeric_limits<float>::lowest());
}
}
}
|
8c32028d34feb81ffafb9644fea9cb02a5fc113a.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMasked.cuh>
#include "THHTensor.hpp"
#include "../generic/THCTensorMasked.cu"
#include <THH/THHGenerateHalfType.h>
| 8c32028d34feb81ffafb9644fea9cb02a5fc113a.cu | #include <THC/THCTensorMasked.cuh>
#include "THCTensor.hpp"
#include "../generic/THCTensorMasked.cu"
#include <THC/THCGenerateHalfType.h>
|
6063a42607422263660f42408d50fd1f8e3b5531.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_4_right;
int xdim0_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_4_right;
int ydim0_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_4_right;
int xdim1_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_4_right;
int ydim1_update_halo_kernel2_xvel_minus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_minus_4_right*(y)+xdim0_update_halo_kernel2_xvel_minus_4_right*ydim0_update_halo_kernel2_xvel_minus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_minus_4_right*(y)+xdim1_update_halo_kernel2_xvel_minus_4_right*ydim1_update_halo_kernel2_xvel_minus_4_right*(z))
//user function
__device__
inline void update_halo_kernel2_xvel_minus_4_right_gpu(double *xvel0, double *xvel1, const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = -xvel0[OPS_ACC0(-4,0,0)];
if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = -xvel1[OPS_ACC1(-4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_xvel_minus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel2_xvel_minus_4_right * ydim0_update_halo_kernel2_xvel_minus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_xvel_minus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel2_xvel_minus_4_right * ydim1_update_halo_kernel2_xvel_minus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,30)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(30,"update_halo_kernel2_xvel_minus_4_right");
OPS_kernels[30].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_right_h || ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_right_h || xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_right_h || ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_minus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_xvel_minus_4_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_minus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_xvel_minus_4_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_minus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_xvel_minus_4_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_minus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_xvel_minus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[30].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_minus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[30].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[30].mpi_time += t2-t1;
OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 30;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 30;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_minus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(30,"update_halo_kernel2_xvel_minus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 6063a42607422263660f42408d50fd1f8e3b5531.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_4_right;
int xdim0_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_4_right;
int ydim0_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_4_right;
int xdim1_update_halo_kernel2_xvel_minus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_4_right;
int ydim1_update_halo_kernel2_xvel_minus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_minus_4_right*(y)+xdim0_update_halo_kernel2_xvel_minus_4_right*ydim0_update_halo_kernel2_xvel_minus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_minus_4_right*(y)+xdim1_update_halo_kernel2_xvel_minus_4_right*ydim1_update_halo_kernel2_xvel_minus_4_right*(z))
//user function
__device__
inline void update_halo_kernel2_xvel_minus_4_right_gpu(double *xvel0, double *xvel1, const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = -xvel0[OPS_ACC0(-4,0,0)];
if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = -xvel1[OPS_ACC1(-4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_xvel_minus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel2_xvel_minus_4_right * ydim0_update_halo_kernel2_xvel_minus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_xvel_minus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel2_xvel_minus_4_right * ydim1_update_halo_kernel2_xvel_minus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,30)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(30,"update_halo_kernel2_xvel_minus_4_right");
OPS_kernels[30].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_right_h || ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_right_h || xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_right_h || ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_minus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_xvel_minus_4_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_minus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_xvel_minus_4_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_minus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_xvel_minus_4_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_minus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_xvel_minus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[30].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_xvel_minus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[30].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[30].mpi_time += t2-t1;
OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 30;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 30;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_minus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(30,"update_halo_kernel2_xvel_minus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
fc2a0b314fe2587ce56c681e83b394a5c24208b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------
*
* vectorAdd.cu
*
* This is the source file of an increment kernel.
*
* This kernel is from CUDA samples. asyncAPI.cu
*
* streamsOptBenchmark/vectorAdd.cu
*
* By Hao Li
*
*------------
*/
#include <stdio.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
// __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
__global__ void vectorAdd(const float *A, float *C, int numElements)
{
for(int l = 0; l < 1000000; l++)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
// C[i] = A[i] + B[i];
C[i] = A[i] + A[numElements + i];
}
}
}
// int
// main(void)
// {
// // Error code to check return values for CUDA calls
// hipError_t err = hipSuccess;
// // Print the vector length to be used, and compute its size
// int numElements = 50000;
// size_t size = numElements * sizeof(float);
// printf("[Vector addition of %d elements]\n", numElements);
// // Allocate the host input vector A
// float *h_A = (float *)malloc(size);
// // Allocate the host input vector B
// float *h_B = (float *)malloc(size);
// // Allocate the host output vector C
// float *h_C = (float *)malloc(size);
// // Verify that allocations succeeded
// if (h_A == NULL || h_B == NULL || h_C == NULL)
// {
// fprintf(stderr, "Failed to allocate host vectors!\n");
// exit(EXIT_FAILURE);
// }
// // Initialize the host input vectors
// for (int i = 0; i < numElements; ++i)
// {
// h_A[i] = rand()/(float)RAND_MAX;
// h_B[i] = rand()/(float)RAND_MAX;
// }
// // Allocate the device input vector A
// float *d_A = NULL;
// err = hipMalloc((void **)&d_A, size);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Allocate the device input vector B
// float *d_B = NULL;
// err = hipMalloc((void **)&d_B, size);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Allocate the device output vector C
// float *d_C = NULL;
// err = hipMalloc((void **)&d_C, size);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Copy the host input vectors A and B in host memory to the device input vectors in
// // device memory
// printf("Copy input data from the host memory to the CUDA device\n");
// err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Launch the Vector Add CUDA Kernel
// int threadsPerBlock = 256;
// int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
// err = hipGetLastError();
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Copy the device result vector in device memory to the host result vector
// // in host memory.
// printf("Copy output data from the CUDA device to the host memory\n");
// err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Verify that the result vector is correct
// for (int i = 0; i < numElements; ++i)
// {
// if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
// {
// fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
// }
// }
// printf("Test PASSED\n");
// // Free device global memory
// err = hipFree(d_A);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = hipFree(d_B);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = hipFree(d_C);
// if (err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Free host memory
// free(h_A);
// free(h_B);
// free(h_C);
// // Reset the device and exit
// // hipDeviceReset causes the driver to clean up all state. While
// // not mandatory in normal operation, it is good practice. It is also
// // needed to ensure correct operation when the application is being
// // profiled. Calling hipDeviceReset causes all profile data to be
// // flushed before the application exits
// err = hipDeviceReset();
// printf("Done\n");
// return 0;
// }
| fc2a0b314fe2587ce56c681e83b394a5c24208b4.cu | /*-----------
*
* vectorAdd.cu
*
* This is the source file of an increment kernel.
*
* This kernel is from CUDA samples. asyncAPI.cu
*
* streamsOptBenchmark/vectorAdd.cu
*
* By Hao Li
*
*------------
*/
#include <stdio.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
// __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
__global__ void vectorAdd(const float *A, float *C, int numElements)
{
for(int l = 0; l < 1000000; l++)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
// C[i] = A[i] + B[i];
C[i] = A[i] + A[numElements + i];
}
}
}
// int
// main(void)
// {
// // Error code to check return values for CUDA calls
// cudaError_t err = cudaSuccess;
// // Print the vector length to be used, and compute its size
// int numElements = 50000;
// size_t size = numElements * sizeof(float);
// printf("[Vector addition of %d elements]\n", numElements);
// // Allocate the host input vector A
// float *h_A = (float *)malloc(size);
// // Allocate the host input vector B
// float *h_B = (float *)malloc(size);
// // Allocate the host output vector C
// float *h_C = (float *)malloc(size);
// // Verify that allocations succeeded
// if (h_A == NULL || h_B == NULL || h_C == NULL)
// {
// fprintf(stderr, "Failed to allocate host vectors!\n");
// exit(EXIT_FAILURE);
// }
// // Initialize the host input vectors
// for (int i = 0; i < numElements; ++i)
// {
// h_A[i] = rand()/(float)RAND_MAX;
// h_B[i] = rand()/(float)RAND_MAX;
// }
// // Allocate the device input vector A
// float *d_A = NULL;
// err = cudaMalloc((void **)&d_A, size);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Allocate the device input vector B
// float *d_B = NULL;
// err = cudaMalloc((void **)&d_B, size);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Allocate the device output vector C
// float *d_C = NULL;
// err = cudaMalloc((void **)&d_C, size);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Copy the host input vectors A and B in host memory to the device input vectors in
// // device memory
// printf("Copy input data from the host memory to the CUDA device\n");
// err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Launch the Vector Add CUDA Kernel
// int threadsPerBlock = 256;
// int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
// err = cudaGetLastError();
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Copy the device result vector in device memory to the host result vector
// // in host memory.
// printf("Copy output data from the CUDA device to the host memory\n");
// err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Verify that the result vector is correct
// for (int i = 0; i < numElements; ++i)
// {
// if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
// {
// fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
// }
// }
// printf("Test PASSED\n");
// // Free device global memory
// err = cudaFree(d_A);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaFree(d_B);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaFree(d_C);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// // Free host memory
// free(h_A);
// free(h_B);
// free(h_C);
// // Reset the device and exit
// // cudaDeviceReset causes the driver to clean up all state. While
// // not mandatory in normal operation, it is good practice. It is also
// // needed to ensure correct operation when the application is being
// // profiled. Calling cudaDeviceReset causes all profile data to be
// // flushed before the application exits
// err = cudaDeviceReset();
// printf("Done\n");
// return 0;
// }
|
ed129d89e5201817ad8f2ac2ce85891ea62fd238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_back;
int xdim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_back;
int ydim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_back;
int xdim1_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_back;
int ydim1_update_halo_kernel5_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_minus_2_back*(y)+xdim0_update_halo_kernel5_minus_2_back*ydim0_update_halo_kernel5_minus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_minus_2_back*(y)+xdim1_update_halo_kernel5_minus_2_back*ydim1_update_halo_kernel5_minus_2_back*(z))
//user function
__device__
inline void update_halo_kernel5_minus_2_back_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = -vol_flux_z[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = -mass_flux_z[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_minus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel5_minus_2_back * ydim0_update_halo_kernel5_minus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_minus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel5_minus_2_back * ydim1_update_halo_kernel5_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,92)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(92,"update_halo_kernel5_minus_2_back");
OPS_kernels[92].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_back_h || ydim0 != ydim0_update_halo_kernel5_minus_2_back_h || xdim1 != xdim1_update_halo_kernel5_minus_2_back_h || ydim1 != ydim1_update_halo_kernel5_minus_2_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_minus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_minus_2_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_minus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_minus_2_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_minus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_minus_2_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_minus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[92].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[92].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[92].mpi_time += t2-t1;
OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 92;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 92;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(92,"update_halo_kernel5_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| ed129d89e5201817ad8f2ac2ce85891ea62fd238.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_back;
int xdim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_back;
int ydim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_back;
int xdim1_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_back;
int ydim1_update_halo_kernel5_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_minus_2_back*(y)+xdim0_update_halo_kernel5_minus_2_back*ydim0_update_halo_kernel5_minus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_minus_2_back*(y)+xdim1_update_halo_kernel5_minus_2_back*ydim1_update_halo_kernel5_minus_2_back*(z))
//user function
__device__
inline void update_halo_kernel5_minus_2_back_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = -vol_flux_z[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = -mass_flux_z[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_minus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel5_minus_2_back * ydim0_update_halo_kernel5_minus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_minus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel5_minus_2_back * ydim1_update_halo_kernel5_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,92)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(92,"update_halo_kernel5_minus_2_back");
OPS_kernels[92].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_back_h || ydim0 != ydim0_update_halo_kernel5_minus_2_back_h || xdim1 != xdim1_update_halo_kernel5_minus_2_back_h || ydim1 != ydim1_update_halo_kernel5_minus_2_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_minus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_minus_2_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_minus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_minus_2_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_minus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_minus_2_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_minus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[92].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_minus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[92].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[92].mpi_time += t2-t1;
OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 92;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 92;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(92,"update_halo_kernel5_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
ebd752076c0b2b2b2285ea9d33f4ade4c20b0723.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "new_kern.hip"
extern "C"
{
void comp_phash(float* dpos, uint* d_pHash, uint* d_pIndex, uint* d_CellHash, uint numParticles, uint numGridCells)
{
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
hipLaunchKernelGGL(( comp_phashK), dim3(numBlocks), dim3(numThreads), 0, 0, (float4*) dpos, d_pHash, d_pIndex, d_CellHash);
getLastCudaError("in phash computation");
}
void setNParameters(NewParams *hostParams){
hipMemcpyToSymbol(nparams, hostParams, sizeof(NewParams));
}
void find_cellStart(uint* cellStart, uint* cellEnd, uint* phash, uint numParticles, uint numCells)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
uint sMemSize = sizeof(uint)*(numThreads+1);
hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint));
hipLaunchKernelGGL(( findCellStartK), dim3(numBlocks), dim3(numThreads), sMemSize, 0, cellStart, cellEnd, phash);
}
void reorder(uint* d_pSortedIndex, float* dSortedA, float* dSortedB,
float* oldA, float* oldB, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipLaunchKernelGGL(( reorderK), dim3(numBlocks), dim3(numThreads), 0, 0, d_pSortedIndex, (float4*)dSortedA,
(float4*)dSortedB, (float4*)oldA, (float4*)oldB);
}
uint vertEdge(uint* connections, const uint* nlist, const uint* num_neigh, const float* dPos,
float maxth, float maxdist, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipLaunchKernelGGL(( vertEdgeK), dim3(numBlocks),dim3(numThreads), 0, 0, nlist, num_neigh,(float4*) dPos, connections, maxth, maxdist*maxdist);
thrust::device_ptr<uint> conns(connections);
uint total = thrust::reduce(conns, conns+numParticles, 0,thrust::plus<uint>());
getLastCudaError("vertical connectivity");
return total;
}
void magForces(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const float* dMom, const uint* nlist, const uint* num_neigh, uint numParticles, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(magForcesK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
hipBindTexture(0, mom_tex, dMom, numParticles*sizeof(float4));
hipLaunchKernelGGL(( magForcesK), dim3(numBlocks),dim3(numThreads), 0, 0, (float4*)dSortedPos, (float4*) dMom,
(float4*) dIntPos, nlist, num_neigh, (float4*) dForce,
(float4*) newPos, deltaTime);
hipUnbindTexture(pos_tex);
hipUnbindTexture(mom_tex);
getLastCudaError("Magforces error");
}
void finiteDip(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const uint* nlist, const uint* num_neigh, uint numParticles,
float dipole_d, float F0, float sigma_0, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(finiteDipK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
hipLaunchKernelGGL(( finiteDipK), dim3(numBlocks),dim3(numThreads), 0, 0, (float4*)dSortedPos, (float4*) dIntPos,
nlist, num_neigh, (float4*) dForce, (float4*) newPos,
dipole_d, F0, sigma_0,deltaTime);
hipUnbindTexture(pos_tex);
getLastCudaError("Finite Magforces error");
}
void pointDip(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const uint* nlist, const uint* num_neigh, uint numParticles,
float forceFactor, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 192;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(pointDipK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
hipLaunchKernelGGL(( pointDipK), dim3(numBlocks),dim3(numThreads), 0, 0, (float4*)dSortedPos, (float4*) dIntPos,
nlist, num_neigh, (float4*) dForce, (float4*) newPos,
forceFactor,deltaTime);
hipUnbindTexture(pos_tex);
getLastCudaError("Point forces error");
}
void magFricForces(const float* dSortedPos, const float* dIntPos, float* newPos,
float* dForceOut, float* dMom, const float* dForceIn, const uint* nlist,
const uint* num_neigh, uint numParticles, float static_fric,float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(magForcesK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
hipBindTexture(0, mom_tex, dMom, numParticles*sizeof(float4));
hipLaunchKernelGGL(( magFricForcesK), dim3(numBlocks),dim3(numThreads), 0, 0, (float4*)dSortedPos, (float4*) dMom,
(float4*) dForceIn, (float4*) dIntPos, nlist, num_neigh,
(float4*) dForceOut,(float4*) newPos,static_fric,deltaTime);
hipUnbindTexture(pos_tex);
hipUnbindTexture(mom_tex);
getLastCudaError("Magforces error");
}
void mutualMagn(const float* pos, const float* oldMag, float* newMag,
const uint* nlist, const uint* numNeigh, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(magForcesK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, pos, numParticles*sizeof(float4));
hipBindTexture(0, mom_tex, oldMag, numParticles*sizeof(float4));
hipLaunchKernelGGL(( mutualMagnK), dim3(numBlocks), dim3(numThreads), 0, 0, (float4*) pos, (float4*) oldMag,
(float4*) newMag, nlist, numNeigh);
hipUnbindTexture(pos_tex);
hipUnbindTexture(mom_tex);
getLastCudaError("Mutual Magn error");
}
void integrateRK4(const float* oldPos, float* PosA, const float* PosB,
const float* PosC, const float* PosD, float* forceA,
const float* forceB, const float* forceC, const float* forceD,
float deltaTime, uint numParticles)
{
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
hipLaunchKernelGGL(( integrateRK4K), dim3(numBlocks), dim3(numThreads), 0, 0,
(float4*) oldPos,
(float4*) PosA,
(float4*) PosB,
(float4*) PosC,
(float4*) PosD,
(float4*) forceA,
(float4*) forceB,
(float4*) forceC,
(float4*) forceD,
deltaTime,
numParticles);
}
void bogacki_ynp1( const float* d_yn, const float* d_k1, const float* d_k2,
const float* d_k3, float* d_ynp1, float deltaTime, uint numParticles) {
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
hipLaunchKernelGGL(( bogacki_ynp1k), dim3(numBlocks), dim3(numThreads), 0, 0,
(float4*) d_yn,
(float4*) d_k1,
(float4*) d_k2,
(float4*) d_k3,
(float4*) d_ynp1,
deltaTime,
numParticles);
getLastCudaError("bogacki_ynp1");
}
void collision_new( const float* dSortedPos, const float* dOldVel, const uint* nlist,
const uint* num_neigh, float* dNewVel, float* dNewPos, uint numParticles,
float raxExp, float deltaTime)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(collisionK, hipFuncCachePreferL1);
hipBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
hipBindTexture(0, vel_tex, dOldVel, numParticles*sizeof(float4));
hipLaunchKernelGGL(( collisionK), dim3(numBlocks),dim3(numThreads), 0, 0, (float4*)dSortedPos, (float4*) dOldVel, nlist,
num_neigh, (float4*) dNewVel, (float4*) dNewPos, raxExp, deltaTime);
hipUnbindTexture(pos_tex);
hipUnbindTexture(vel_tex);
getLastCudaError("collision");
}
}
| ebd752076c0b2b2b2285ea9d33f4ade4c20b0723.cu | #include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "new_kern.cu"
extern "C"
{
void comp_phash(float* dpos, uint* d_pHash, uint* d_pIndex, uint* d_CellHash, uint numParticles, uint numGridCells)
{
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
comp_phashK<<<numBlocks, numThreads>>> ( (float4*) dpos, d_pHash, d_pIndex, d_CellHash);
getLastCudaError("in phash computation");
}
void setNParameters(NewParams *hostParams){
cudaMemcpyToSymbol(nparams, hostParams, sizeof(NewParams));
}
void find_cellStart(uint* cellStart, uint* cellEnd, uint* phash, uint numParticles, uint numCells)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
uint sMemSize = sizeof(uint)*(numThreads+1);
cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint));
findCellStartK<<< numBlocks, numThreads, sMemSize>>>(cellStart, cellEnd, phash);
}
void reorder(uint* d_pSortedIndex, float* dSortedA, float* dSortedB,
float* oldA, float* oldB, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
reorderK<<<numBlocks, numThreads>>>(d_pSortedIndex, (float4*)dSortedA,
(float4*)dSortedB, (float4*)oldA, (float4*)oldB);
}
uint vertEdge(uint* connections, const uint* nlist, const uint* num_neigh, const float* dPos,
float maxth, float maxdist, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
vertEdgeK<<<numBlocks,numThreads>>>(nlist, num_neigh,(float4*) dPos, connections, maxth, maxdist*maxdist);
thrust::device_ptr<uint> conns(connections);
uint total = thrust::reduce(conns, conns+numParticles, 0,thrust::plus<uint>());
getLastCudaError("vertical connectivity");
return total;
}
void magForces(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const float* dMom, const uint* nlist, const uint* num_neigh, uint numParticles, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(magForcesK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
cudaBindTexture(0, mom_tex, dMom, numParticles*sizeof(float4));
magForcesK<<<numBlocks,numThreads>>>( (float4*)dSortedPos, (float4*) dMom,
(float4*) dIntPos, nlist, num_neigh, (float4*) dForce,
(float4*) newPos, deltaTime);
cudaUnbindTexture(pos_tex);
cudaUnbindTexture(mom_tex);
getLastCudaError("Magforces error");
}
void finiteDip(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const uint* nlist, const uint* num_neigh, uint numParticles,
float dipole_d, float F0, float sigma_0, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(finiteDipK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
finiteDipK<<<numBlocks,numThreads>>>( (float4*)dSortedPos, (float4*) dIntPos,
nlist, num_neigh, (float4*) dForce, (float4*) newPos,
dipole_d, F0, sigma_0,deltaTime);
cudaUnbindTexture(pos_tex);
getLastCudaError("Finite Magforces error");
}
void pointDip(const float* dSortedPos, const float* dIntPos, float* newPos, float* dForce,
const uint* nlist, const uint* num_neigh, uint numParticles,
float forceFactor, float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 192;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(pointDipK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
pointDipK<<<numBlocks,numThreads>>>( (float4*)dSortedPos, (float4*) dIntPos,
nlist, num_neigh, (float4*) dForce, (float4*) newPos,
forceFactor,deltaTime);
cudaUnbindTexture(pos_tex);
getLastCudaError("Point forces error");
}
void magFricForces(const float* dSortedPos, const float* dIntPos, float* newPos,
float* dForceOut, float* dMom, const float* dForceIn, const uint* nlist,
const uint* num_neigh, uint numParticles, float static_fric,float deltaTime)
{
assert(newPos != dIntPos);
assert(newPos != dSortedPos);
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(magForcesK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
cudaBindTexture(0, mom_tex, dMom, numParticles*sizeof(float4));
magFricForcesK<<<numBlocks,numThreads>>>((float4*)dSortedPos, (float4*) dMom,
(float4*) dForceIn, (float4*) dIntPos, nlist, num_neigh,
(float4*) dForceOut,(float4*) newPos,static_fric,deltaTime);
cudaUnbindTexture(pos_tex);
cudaUnbindTexture(mom_tex);
getLastCudaError("Magforces error");
}
void mutualMagn(const float* pos, const float* oldMag, float* newMag,
const uint* nlist, const uint* numNeigh, uint numParticles)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(magForcesK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, pos, numParticles*sizeof(float4));
cudaBindTexture(0, mom_tex, oldMag, numParticles*sizeof(float4));
mutualMagnK<<<numBlocks, numThreads>>>( (float4*) pos, (float4*) oldMag,
(float4*) newMag, nlist, numNeigh);
cudaUnbindTexture(pos_tex);
cudaUnbindTexture(mom_tex);
getLastCudaError("Mutual Magn error");
}
void integrateRK4(const float* oldPos, float* PosA, const float* PosB,
const float* PosC, const float* PosD, float* forceA,
const float* forceB, const float* forceC, const float* forceD,
float deltaTime, uint numParticles)
{
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
integrateRK4K<<<numBlocks, numThreads>>>(
(float4*) oldPos,
(float4*) PosA,
(float4*) PosB,
(float4*) PosC,
(float4*) PosD,
(float4*) forceA,
(float4*) forceB,
(float4*) forceC,
(float4*) forceD,
deltaTime,
numParticles);
}
void bogacki_ynp1( const float* d_yn, const float* d_k1, const float* d_k2,
const float* d_k3, float* d_ynp1, float deltaTime, uint numParticles) {
uint numThreads = 256;
uint numBlocks = iDivUp(numParticles, numThreads);
bogacki_ynp1k<<<numBlocks, numThreads>>>(
(float4*) d_yn,
(float4*) d_k1,
(float4*) d_k2,
(float4*) d_k3,
(float4*) d_ynp1,
deltaTime,
numParticles);
getLastCudaError("bogacki_ynp1");
}
void collision_new( const float* dSortedPos, const float* dOldVel, const uint* nlist,
const uint* num_neigh, float* dNewVel, float* dNewPos, uint numParticles,
float raxExp, float deltaTime)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(collisionK, cudaFuncCachePreferL1);
cudaBindTexture(0, pos_tex, dSortedPos, numParticles*sizeof(float4));
cudaBindTexture(0, vel_tex, dOldVel, numParticles*sizeof(float4));
collisionK<<<numBlocks,numThreads>>>( (float4*)dSortedPos, (float4*) dOldVel, nlist,
num_neigh, (float4*) dNewVel, (float4*) dNewPos, raxExp, deltaTime);
cudaUnbindTexture(pos_tex);
cudaUnbindTexture(vel_tex);
getLastCudaError("collision");
}
}
|
5c2a2f4fedefb64745441d1a605e52ea47c68e0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
double get_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_sec + (double)1e-6 * tv.tv_usec;
}
__global__ void vec_add(int *x, int *y, int *z, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
z[i] = x[i] + y[i];
}
}
const int n = 1 << 24;
int *h_x, *h_y, *h_z;
int *d_x, *d_y, *d_z;
double write_time, kernel_time, read_time, t;
int main()
{
/* host buffer setup */
h_x = (int*)malloc(sizeof(int) * n);
h_y = (int*)malloc(sizeof(int) * n);
h_z = (int*)malloc(sizeof(int) * n);
for (int i = 0; i < n; ++i) {
h_x[i] = rand() % 100;
h_y[i] = rand() % 100;
}
/* device buffer setup */
hipMalloc(&d_x, sizeof(int) * n);
hipMalloc(&d_y, sizeof(int) * n);
hipMalloc(&d_z, sizeof(int) * n);
/* host to device memory transfer */
t = get_time();
hipMemcpy(d_x, h_x, sizeof(int) * n, hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, sizeof(int) * n, hipMemcpyHostToDevice);
write_time = get_time() - t;
/* kernel execution */
int threads_per_block = 1024;
int num_of_blocks = (n + threads_per_block - 1) / threads_per_block;
t = get_time();
hipLaunchKernelGGL(( vec_add), dim3(num_of_blocks), dim3(threads_per_block), 0, 0, d_x, d_y, d_z, n);
hipDeviceSynchronize();
kernel_time = get_time() - t;
/* device to host memory transfer */
t = get_time();
hipMemcpy(h_z, d_z, sizeof(int) * n, hipMemcpyDeviceToHost);
read_time = get_time() - t;
/* verification */
for (int i = 0; i < n; ++i) {
if (h_x[i] + h_y[i] != h_z[i]) {
printf("Incorrect (i = %d : %d + %d != %d)\n",
i, h_x[i], h_y[i], h_z[i]);
break;
}
}
free(h_x);
free(h_y);
free(h_z);
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
printf("write buffer: %f seconds\n", write_time);
printf("kernel: %f seconds\n", kernel_time);
printf("read buffer: %f seconds\n\n", read_time);
printf("Finished!\n");
return 0;
}
| 5c2a2f4fedefb64745441d1a605e52ea47c68e0a.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
double get_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_sec + (double)1e-6 * tv.tv_usec;
}
__global__ void vec_add(int *x, int *y, int *z, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
z[i] = x[i] + y[i];
}
}
const int n = 1 << 24;
int *h_x, *h_y, *h_z;
int *d_x, *d_y, *d_z;
double write_time, kernel_time, read_time, t;
int main()
{
/* host buffer setup */
h_x = (int*)malloc(sizeof(int) * n);
h_y = (int*)malloc(sizeof(int) * n);
h_z = (int*)malloc(sizeof(int) * n);
for (int i = 0; i < n; ++i) {
h_x[i] = rand() % 100;
h_y[i] = rand() % 100;
}
/* device buffer setup */
cudaMalloc(&d_x, sizeof(int) * n);
cudaMalloc(&d_y, sizeof(int) * n);
cudaMalloc(&d_z, sizeof(int) * n);
/* host to device memory transfer */
t = get_time();
cudaMemcpy(d_x, h_x, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, sizeof(int) * n, cudaMemcpyHostToDevice);
write_time = get_time() - t;
/* kernel execution */
int threads_per_block = 1024;
int num_of_blocks = (n + threads_per_block - 1) / threads_per_block;
t = get_time();
vec_add<<<num_of_blocks, threads_per_block>>>(d_x, d_y, d_z, n);
cudaDeviceSynchronize();
kernel_time = get_time() - t;
/* device to host memory transfer */
t = get_time();
cudaMemcpy(h_z, d_z, sizeof(int) * n, cudaMemcpyDeviceToHost);
read_time = get_time() - t;
/* verification */
for (int i = 0; i < n; ++i) {
if (h_x[i] + h_y[i] != h_z[i]) {
printf("Incorrect (i = %d : %d + %d != %d)\n",
i, h_x[i], h_y[i], h_z[i]);
break;
}
}
free(h_x);
free(h_y);
free(h_z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
printf("write buffer: %f seconds\n", write_time);
printf("kernel: %f seconds\n", kernel_time);
printf("read buffer: %f seconds\n\n", read_time);
printf("Finished!\n");
return 0;
}
|
78471e9eeadf0f11612e5bff4076da65da6ad03d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mykernel(void)
{
}
int main(void)
{
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
printf("Hello World!\n");
return 0;
}
| 78471e9eeadf0f11612e5bff4076da65da6ad03d.cu | #include <stdio.h>
__global__ void mykernel(void)
{
}
int main(void)
{
mykernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
412f5b38cf8ae0e2e7ca95873cc284fd0f994482.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math/elementwise.h"
#include "dragon/utils/math/functional.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
/*!
* Unary Functors
*/
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
return func(x); \
} \
}
DEFINE_UNARY_FUNCTOR(Neg, -);
DEFINE_UNARY_FUNCTOR(Ceil, ceil);
DEFINE_UNARY_FUNCTOR(Cos, cos);
DEFINE_UNARY_FUNCTOR(Exp, exp);
DEFINE_UNARY_FUNCTOR(Floor, floor);
DEFINE_UNARY_FUNCTOR(Log, log);
DEFINE_UNARY_FUNCTOR(Round, round);
DEFINE_UNARY_FUNCTOR(Rsqrt, rsqrt);
DEFINE_UNARY_FUNCTOR(Sin, sin);
DEFINE_UNARY_FUNCTOR(Sqrt, sqrt);
#if __CUDA_ARCH__ >= 530
DEFINE_UNARY_FUNCTOR(NegHalf, __hneg);
DEFINE_UNARY_FUNCTOR(NegHalf2, __hneg2);
DEFINE_UNARY_FUNCTOR(CeilHalf, hceil);
DEFINE_UNARY_FUNCTOR(CeilHalf2, h2ceil);
DEFINE_UNARY_FUNCTOR(CosHalf, hcos);
DEFINE_UNARY_FUNCTOR(CosHalf2, h2cos);
DEFINE_UNARY_FUNCTOR(ExpHalf, hexp);
DEFINE_UNARY_FUNCTOR(ExpHalf2, h2exp);
DEFINE_UNARY_FUNCTOR(FloorHalf, hfloor);
DEFINE_UNARY_FUNCTOR(FloorHalf2, h2floor);
DEFINE_UNARY_FUNCTOR(InvHalf, hrcp);
DEFINE_UNARY_FUNCTOR(InvHalf2, h2rcp);
DEFINE_UNARY_FUNCTOR(LogHalf, hlog);
DEFINE_UNARY_FUNCTOR(LogHalf2, h2log);
DEFINE_UNARY_FUNCTOR(RoundHalf, hrint);
DEFINE_UNARY_FUNCTOR(RoundHalf2, h2rint);
DEFINE_UNARY_FUNCTOR(RsqrtHalf, hrsqrt);
DEFINE_UNARY_FUNCTOR(RsqrtHalf2, h2rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf, hsin);
DEFINE_UNARY_FUNCTOR(SinHalf2, h2sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf, hsqrt);
DEFINE_UNARY_FUNCTOR(SqrtHalf2, h2sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
return __float2half(func(__half2float(x))); \
} \
}
#if __CUDA_ARCH__ < 530
DEFINE_UNARY_FUNCTOR(NegHalf, -);
DEFINE_UNARY_FUNCTOR(CeilHalf, ceil);
DEFINE_UNARY_FUNCTOR(CosHalf, cos);
DEFINE_UNARY_FUNCTOR(ExpHalf, exp);
DEFINE_UNARY_FUNCTOR(FloorHalf, floor);
DEFINE_UNARY_FUNCTOR(InvHalf, __frcp_rn);
DEFINE_UNARY_FUNCTOR(LogHalf, log);
DEFINE_UNARY_FUNCTOR(RoundHalf, round);
DEFINE_UNARY_FUNCTOR(RsqrtHalf, rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf, sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf, sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
const float2 val = __half22float2(x); \
return __floats2half2_rn(func(val.x), func(val.y)); \
} \
}
#if __CUDA_ARCH__ < 530
DEFINE_UNARY_FUNCTOR(NegHalf2, -);
DEFINE_UNARY_FUNCTOR(CeilHalf2, ceil);
DEFINE_UNARY_FUNCTOR(CosHalf2, cos);
DEFINE_UNARY_FUNCTOR(ExpHalf2, exp);
DEFINE_UNARY_FUNCTOR(FloorHalf2, floor);
DEFINE_UNARY_FUNCTOR(InvHalf2, __frcp_rn);
DEFINE_UNARY_FUNCTOR(LogHalf2, log);
DEFINE_UNARY_FUNCTOR(RoundHalf2, round);
DEFINE_UNARY_FUNCTOR(RsqrtHalf2, rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf2, sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf2, sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
/*!
* Unary Function Kernels
*/
template <typename InputT, typename OutputT, class Functor>
__global__ void
_SimpleUnaryFunc(const int N, const Functor op, const InputT* x, OutputT* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(x[i]);
}
}
template <typename T>
__global__ void _Abs(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const T val = x[i];
y[i] = val > 0 ? val : -val;
}
}
template <>
__global__ void _Abs<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(x[i]);
y[i] = __float2half(val > 0 ? val : -val);
}
}
template <>
__global__ void _Abs<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(
val.x > 0.f ? val.x : -val.x, val.y > 0.f ? val.y : -val.y);
}
}
__global__ void _Inv(const int N, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __frcp_rn(x[i]);
}
}
__global__ void _Inv(const int N, const double* x, double* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __drcp_rn(x[i]);
}
}
template <typename T>
__global__ void _InvStd(const int N, const T eps, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = rsqrt(x[i] + eps);
}
}
__global__ void _InvStd(const int N, const float eps, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(rsqrt(__half2float(x[i]) + eps));
}
}
__global__ void
_InvStd(const int N, const float eps, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(rsqrt(val.x + eps), rsqrt(val.y + eps));
}
}
template <typename T>
__global__ void _Powx(const int N, const T exponent, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = pow(x[i], exponent);
}
}
__global__ void
_Powx(const int N, const float exponent, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(pow(__half2float(x[i]), exponent));
}
}
__global__ void
_Powx(const int N, const float exponent, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(pow(val.x, exponent), pow(val.y, exponent));
}
}
template <typename T>
__global__ void _Set(const int N, const T alpha, T* x) {
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = alpha;
}
}
template <typename T>
__global__ void _Sign(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::Sign(x[i]);
}
}
template <>
__global__ void _Sign<uint8_t>(const int N, const uint8_t* x, uint8_t* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] > 0 ? uint8_t(1) : uint8_t(0);
}
}
template <>
__global__ void _Sign<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(x[i]);
y[i] = __float2half(math::utils::Sign(val));
}
}
template <>
__global__ void _Sign<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] =
__floats2half2_rn(math::utils::Sign(val.x), math::utils::Sign(val.y));
}
}
template <typename T>
__global__ void _Square(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::Square(x[i]);
}
}
template <typename T>
__global__ void _NotZero(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] != T(0) ? true : false;
}
}
template <>
__global__ void _NotZero<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __half2float(x[i]) != 0.f ? true : false;
}
}
template <typename T>
__global__ void _IsInf(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsInf(x[i]);
}
}
template <>
__global__ void _IsInf<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsInf(x[i]);
}
}
template <typename T>
__global__ void _IsNaN(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(x[i]);
}
}
template <>
__global__ void _IsNaN<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(x[i]);
}
}
template <typename T>
__global__ void _ReplaceNaN(const int N, const T value, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(__ldg(x + i)) ? value : __ldg(x + i);
}
}
template <>
__global__ void
_ReplaceNaN<half>(const int N, const half value, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(__ldg(x + i)) ? value : __ldg(x + i);
}
}
template <typename T, class Functor>
__global__ void
_Bias(const int N, const T beta, const Functor op, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(x[i], beta);
}
}
template <typename T>
__global__ void
_ApplyMask(const int N, const T alpha, const uint8_t* mask, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] * T(mask[i]) * alpha;
}
}
__global__ void _ApplyMask(
const int N,
const float alpha,
const uint8_t* mask,
const half* x,
half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(__half2float(x[i]) * (alpha * float(mask[i])));
}
}
/*!
* Binary Function Kernels
*/
template <typename InputT, typename OutputT, class Functor>
__global__ void _SimpleBinaryFunc(
const int N,
const Functor op,
const InputT* a,
const InputT* b,
OutputT* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(a[i], b[i]);
}
}
template <typename T>
__global__ void
_Where(const int N, const T* a, const T* b, const bool* c, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = c[i] ? a[i] : b[i];
}
}
} // namespace
#define DEFINE_UNARY_FUNC(name, InputT, OutputT, Functor) \
template <> \
DRAGON_API void name<InputT, CUDAContext>( \
const int N, const InputT* x, OutputT* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _SimpleUnaryFunc), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, Functor<InputT>(), x, y); \
}
DEFINE_UNARY_FUNC(BitwiseNot, bool, bool, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, uint8_t, uint8_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int8_t, int8_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int, int, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int64_t, int64_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(Not, bool, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, uint8_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int8_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int64_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, float16, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, float, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, double, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Neg, int8_t, int8_t, NegFunctor);
DEFINE_UNARY_FUNC(Neg, int, int, NegFunctor);
DEFINE_UNARY_FUNC(Neg, int64_t, int64_t, NegFunctor);
DEFINE_UNARY_FUNC(Neg, float, float, NegFunctor);
DEFINE_UNARY_FUNC(Neg, double, double, NegFunctor);
DEFINE_UNARY_FUNC(Ceil, float, float, CeilFunctor);
DEFINE_UNARY_FUNC(Ceil, double, double, CeilFunctor);
DEFINE_UNARY_FUNC(Cos, float, float, CosFunctor);
DEFINE_UNARY_FUNC(Cos, double, double, CosFunctor);
DEFINE_UNARY_FUNC(Exp, float, float, ExpFunctor);
DEFINE_UNARY_FUNC(Exp, double, double, ExpFunctor);
DEFINE_UNARY_FUNC(Floor, float, float, FloorFunctor);
DEFINE_UNARY_FUNC(Floor, double, double, FloorFunctor);
DEFINE_UNARY_FUNC(Log, float, float, LogFunctor);
DEFINE_UNARY_FUNC(Log, double, double, LogFunctor);
DEFINE_UNARY_FUNC(Round, float, float, RoundFunctor);
DEFINE_UNARY_FUNC(Round, double, double, RoundFunctor);
DEFINE_UNARY_FUNC(Rsqrt, float, float, RsqrtFunctor);
DEFINE_UNARY_FUNC(Rsqrt, double, double, RsqrtFunctor);
DEFINE_UNARY_FUNC(Sin, float, float, SinFunctor);
DEFINE_UNARY_FUNC(Sin, double, double, SinFunctor);
DEFINE_UNARY_FUNC(Sqrt, float, float, SqrtFunctor);
DEFINE_UNARY_FUNC(Sqrt, double, double, SqrtFunctor);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name, T) \
template <> \
DRAGON_API void name<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), N, x, y); \
}
DEFINE_UNARY_FUNC(Abs, uint8_t);
DEFINE_UNARY_FUNC(Abs, int8_t);
DEFINE_UNARY_FUNC(Abs, int);
DEFINE_UNARY_FUNC(Abs, int64_t);
DEFINE_UNARY_FUNC(Abs, float);
DEFINE_UNARY_FUNC(Abs, double);
DEFINE_UNARY_FUNC(Inv, float);
DEFINE_UNARY_FUNC(Inv, double);
DEFINE_UNARY_FUNC(Sign, uint8_t);
DEFINE_UNARY_FUNC(Sign, int8_t);
DEFINE_UNARY_FUNC(Sign, int);
DEFINE_UNARY_FUNC(Sign, int64_t);
DEFINE_UNARY_FUNC(Sign, float);
DEFINE_UNARY_FUNC(Sign, double);
DEFINE_UNARY_FUNC(Square, uint8_t);
DEFINE_UNARY_FUNC(Square, int8_t);
DEFINE_UNARY_FUNC(Square, int);
DEFINE_UNARY_FUNC(Square, int64_t);
DEFINE_UNARY_FUNC(Square, float);
DEFINE_UNARY_FUNC(Square, double);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name, HalfFunctor, Half2Functor) \
template <> \
DRAGON_API void name<float16, CUDAContext>( \
const int N, const float16* x, float16* y, CUDAContext* ctx) { \
if ((N & 1) == 0) { \
hipLaunchKernelGGL(( _SimpleUnaryFunc), \
CUDA_BLOCKS(N >> 1), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
N >> 1, \
Half2Functor<half2>(), \
reinterpret_cast<const half2*>(x), \
reinterpret_cast<half2*>(y)); \
} else { \
hipLaunchKernelGGL(( _SimpleUnaryFunc), \
CUDA_BLOCKS(N), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
N, \
HalfFunctor<half>(), \
reinterpret_cast<const half*>(x), \
reinterpret_cast<half*>(y)); \
} \
}
DEFINE_UNARY_FUNC(Neg, NegHalfFunctor, NegHalf2Functor);
DEFINE_UNARY_FUNC(Ceil, CeilHalfFunctor, CeilHalf2Functor);
DEFINE_UNARY_FUNC(Cos, CosHalfFunctor, CosHalf2Functor);
DEFINE_UNARY_FUNC(Exp, ExpHalfFunctor, ExpHalf2Functor);
DEFINE_UNARY_FUNC(Floor, FloorHalfFunctor, FloorHalf2Functor);
DEFINE_UNARY_FUNC(Log, LogHalfFunctor, LogHalf2Functor);
DEFINE_UNARY_FUNC(Inv, InvHalfFunctor, InvHalf2Functor);
DEFINE_UNARY_FUNC(Round, RoundHalfFunctor, RoundHalf2Functor);
DEFINE_UNARY_FUNC(Rsqrt, RsqrtHalfFunctor, RsqrtHalf2Functor);
DEFINE_UNARY_FUNC(Sin, SinHalfFunctor, SinHalf2Functor);
DEFINE_UNARY_FUNC(Sqrt, SqrtHalfFunctor, SqrtHalf2Functor);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name) \
template <> \
DRAGON_API void name<float16, CUDAContext>( \
const int N, const float16* x, float16* y, CUDAContext* ctx) { \
if ((N & 1) == 0) { \
hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N >> 1, \
reinterpret_cast<const half2*>(x), \
reinterpret_cast<half2*>(y)); \
} else { \
hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); \
} \
}
DEFINE_UNARY_FUNC(Abs);
DEFINE_UNARY_FUNC(Sign);
DEFINE_UNARY_FUNC(Square);
#undef DEFINE_UNARY_FUNC
#define DEFINE_SET_FUNC(T) \
template <> \
DRAGON_API void Set<T, CUDAContext>( \
const int N, const T value, T* y, CUDAContext* ctx) { \
if (value == T(0)) { \
CUDA_CHECK(hipMemsetAsync(y, 0, sizeof(T) * N, ctx->cuda_stream())); \
} else { \
hipLaunchKernelGGL(( _Set), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, value, y); \
} \
}
template <>
DRAGON_API void Set<float16, CUDAContext>(
const int N,
const float16 value,
float16* y,
CUDAContext* ctx) {
if (value.x == (unsigned short)0) {
CUDA_CHECK(hipMemsetAsync(y, 0, sizeof(float16) * N, ctx->cuda_stream()));
return;
}
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _Set), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1, convert::To<half2>(value), reinterpret_cast<half2*>(y));
} else {
hipLaunchKernelGGL(( _Set), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), N, value, y);
}
}
DEFINE_SET_FUNC(bool);
DEFINE_SET_FUNC(uint8_t);
DEFINE_SET_FUNC(int8_t);
DEFINE_SET_FUNC(int);
DEFINE_SET_FUNC(int64_t);
DEFINE_SET_FUNC(float);
DEFINE_SET_FUNC(double);
#undef DEFINE_SET_FUNC
#define DEFINE_INVSTD_FUNC(T) \
template <> \
DRAGON_API void InvStd<T, CUDAContext>( \
const int N, const float eps, const T* x, T* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _InvStd), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, (T)eps, x, y); \
}
template <>
DRAGON_API void InvStd<float16, CUDAContext>(
const int N,
const float eps,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _InvStd), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1,
eps,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
hipLaunchKernelGGL(( _InvStd), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, eps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
}
DEFINE_INVSTD_FUNC(float);
DEFINE_INVSTD_FUNC(double);
#undef DEFINE_INVSTD_FUNC
#define DEFINE_POWX_FUNC(T) \
template <> \
DRAGON_API void Powx<T, CUDAContext>( \
const int N, const float exponent, const T* x, T* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _Powx), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, (T)exponent, x, y); \
}
template <>
DRAGON_API void Powx<float16, CUDAContext>(
const int N,
const float exponent,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _Powx), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1,
exponent,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
hipLaunchKernelGGL(( _Powx), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N,
exponent,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
}
DEFINE_POWX_FUNC(float);
DEFINE_POWX_FUNC(double);
#undef DEFINE_POWX_FUNC
#define DEFINE_NOT_ZERO_FUNC(T) \
template <> \
DRAGON_API void NotZero<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _NotZero), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, x, y); \
}
template <>
DRAGON_API void NotZero<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _NotZero), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_NOT_ZERO_FUNC(bool);
DEFINE_NOT_ZERO_FUNC(uint8_t);
DEFINE_NOT_ZERO_FUNC(int8_t);
DEFINE_NOT_ZERO_FUNC(int);
DEFINE_NOT_ZERO_FUNC(int64_t);
DEFINE_NOT_ZERO_FUNC(float);
DEFINE_NOT_ZERO_FUNC(double);
#undef DEFINE_NOT_ZERO_FUNC
#define DEFINE_IS_INF_FUNC(T) \
template <> \
DRAGON_API void IsInf<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _IsInf), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), N, x, y); \
}
template <>
DRAGON_API void IsInf<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _IsInf), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_IS_INF_FUNC(float);
DEFINE_IS_INF_FUNC(double);
#undef DEFINE_IS_INF_FUNC
#define DEFINE_IS_NAN_FUNC(T) \
template <> \
DRAGON_API void IsNaN<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _IsNaN), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), N, x, y); \
}
template <>
DRAGON_API void IsNaN<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _IsNaN), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_IS_NAN_FUNC(float);
DEFINE_IS_NAN_FUNC(double);
#undef DEFINE_IS_NAN_FUNC
#define DEFINE_REPLACE_NAN_FUNC(T) \
template <> \
DRAGON_API void ReplaceNaN<T, CUDAContext>( \
const int N, const T value, const T* x, T* y, CUDAContext* ctx) { \
hipLaunchKernelGGL(( _ReplaceNaN), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, value, x, y); \
}
template <>
DRAGON_API void ReplaceNaN<float16, CUDAContext>(
const int N,
const float16 value,
const float16* x,
float16* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _ReplaceNaN), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N,
convert::To<half>(value),
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
DEFINE_REPLACE_NAN_FUNC(float);
DEFINE_REPLACE_NAN_FUNC(double);
#undef DEFINE_REPLACE_NAN_FUNC
#define DEFINE_BIAS_FUNC(T) \
template <> \
DRAGON_API void Bias<T, CUDAContext>( \
const int N, const float beta, const T* x, T* y, CUDAContext* ctx) { \
if (beta == 0.f) return; \
hipLaunchKernelGGL(( _Bias), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, (T)beta, math::PlusFunctor<T>(), x, y); \
}
template <>
DRAGON_API void Bias<float16, CUDAContext>(
const int N,
const float beta,
const float16* x,
float16* y,
CUDAContext* ctx) {
if (beta == 0.f) return;
if ((N & 1) == 0) {
hipLaunchKernelGGL(( _Bias), dim3(CUDA_BLOCKS(N >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N >> 1,
convert::To<half2>(beta),
math::PlusFunctor<half2>(),
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
hipLaunchKernelGGL(( _Bias), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N,
convert::To<half>(beta),
math::PlusFunctor<half>(),
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
}
DEFINE_BIAS_FUNC(uint8_t);
DEFINE_BIAS_FUNC(int8_t);
DEFINE_BIAS_FUNC(int);
DEFINE_BIAS_FUNC(int64_t);
DEFINE_BIAS_FUNC(float);
DEFINE_BIAS_FUNC(double);
#undef DEFINE_BIAS_FUNC
#define DEFINE_APPLY_MASK_FUNC(T, AccT) \
template <> \
DRAGON_API void ApplyMask<T, CUDAContext>( \
const int N, \
const float alpha, \
const uint8_t* mask, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _ApplyMask), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, \
convert::To<AccT>(alpha), \
mask, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
}
DEFINE_APPLY_MASK_FUNC(uint8_t, uint8_t);
DEFINE_APPLY_MASK_FUNC(int8_t, int8_t);
DEFINE_APPLY_MASK_FUNC(int, int);
DEFINE_APPLY_MASK_FUNC(int64_t, int64_t);
DEFINE_APPLY_MASK_FUNC(float16, float);
DEFINE_APPLY_MASK_FUNC(float, float);
DEFINE_APPLY_MASK_FUNC(double, double);
#undef DEFINE_APPLY_MASK_FUNC
#define DEFINE_BINARY_FUNC(name, InputT, OutputT, Functor) \
template <> \
DRAGON_API void name<InputT, CUDAContext>( \
const int N, \
const InputT* a, \
const InputT* b, \
OutputT* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _SimpleBinaryFunc), \
CUDA_BLOCKS(N), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
N, \
Functor<math::ScalarType<InputT>::type>(), \
reinterpret_cast<const math::ScalarType<InputT>::type*>(a), \
reinterpret_cast<const math::ScalarType<InputT>::type*>(b), \
reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \
}
DEFINE_BINARY_FUNC(Add, uint8_t, uint8_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int8_t, int8_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int, int, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int64_t, int64_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, float16, float16, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, float, float, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, double, double, math::PlusFunctor);
DEFINE_BINARY_FUNC(Sub, uint8_t, uint8_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int8_t, int8_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int, int, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int64_t, int64_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, float16, float16, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, float, float, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, double, double, math::MinusFunctor);
DEFINE_BINARY_FUNC(Mul, uint8_t, uint8_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int8_t, int8_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int, int, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int64_t, int64_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, float16, float16, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, float, float, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, double, double, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Div, uint8_t, uint8_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int8_t, int8_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int, int, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int64_t, int64_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, float16, float16, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, float, float, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, double, double, math::DividesFunctor);
DEFINE_BINARY_FUNC(Pow, float16, float16, math::PowFunctor);
DEFINE_BINARY_FUNC(Pow, float, float, math::PowFunctor);
DEFINE_BINARY_FUNC(Pow, double, double, math::PowFunctor);
DEFINE_BINARY_FUNC(Minimum, uint8_t, uint8_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int8_t, int8_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int, int, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int64_t, int64_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, float16, float16, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, float, float, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, double, double, math::MinFunctor);
DEFINE_BINARY_FUNC(Maximum, uint8_t, uint8_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int8_t, int8_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int, int, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int64_t, int64_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, float16, float16, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, float, float, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, double, double, math::MaxFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, bool, bool, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, uint8_t, uint8_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int8_t, int8_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int, int, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int64_t, int64_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, bool, bool, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, uint8_t, uint8_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int8_t, int8_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int, int, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int64_t, int64_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, bool, bool, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, uint8_t, uint8_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int8_t, int8_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int, int, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int64_t, int64_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(And, bool, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, uint8_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int8_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int64_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, float16, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, float, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, double, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(Or, bool, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, uint8_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int8_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int64_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, float16, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, float, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, double, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Xor, bool, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, uint8_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int8_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int64_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, float16, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, float, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, double, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Equal, bool, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, uint8_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int8_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int64_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, float16, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, float, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, double, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, bool, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, uint8_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int8_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int64_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, float16, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, float, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, double, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(Less, bool, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, uint8_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int8_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int64_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, float16, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, float, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, double, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(LessEqual, bool, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, uint8_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int8_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int64_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, float16, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, float, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, double, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(Greater, bool, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, uint8_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int8_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int64_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, float16, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, float, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, double, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, bool, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, uint8_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int8_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int64_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, float16, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, float, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, double, bool, math::GreaterEqualFunctor);
#undef DEFINE_BINARY_FUNC
#define DEFINE_WHERE_FUNC(T) \
template <> \
DRAGON_API void Where<T, CUDAContext>( \
const int N, \
const T* a, \
const T* b, \
const bool* c, \
T* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _Where), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, a, b, c, y); \
}
DEFINE_WHERE_FUNC(uint8_t);
DEFINE_WHERE_FUNC(bool);
DEFINE_WHERE_FUNC(int8_t);
DEFINE_WHERE_FUNC(int);
DEFINE_WHERE_FUNC(int64_t);
DEFINE_WHERE_FUNC(float16);
DEFINE_WHERE_FUNC(float);
DEFINE_WHERE_FUNC(double);
#undef DEFINE_WHERE_FUNC
} // namespace math
} // namespace dragon
#endif // USE_ROCM
| 412f5b38cf8ae0e2e7ca95873cc284fd0f994482.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math/elementwise.h"
#include "dragon/utils/math/functional.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
/*!
* Unary Functors
*/
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
return func(x); \
} \
}
DEFINE_UNARY_FUNCTOR(Neg, -);
DEFINE_UNARY_FUNCTOR(Ceil, ceil);
DEFINE_UNARY_FUNCTOR(Cos, cos);
DEFINE_UNARY_FUNCTOR(Exp, exp);
DEFINE_UNARY_FUNCTOR(Floor, floor);
DEFINE_UNARY_FUNCTOR(Log, log);
DEFINE_UNARY_FUNCTOR(Round, round);
DEFINE_UNARY_FUNCTOR(Rsqrt, rsqrt);
DEFINE_UNARY_FUNCTOR(Sin, sin);
DEFINE_UNARY_FUNCTOR(Sqrt, sqrt);
#if __CUDA_ARCH__ >= 530
DEFINE_UNARY_FUNCTOR(NegHalf, __hneg);
DEFINE_UNARY_FUNCTOR(NegHalf2, __hneg2);
DEFINE_UNARY_FUNCTOR(CeilHalf, hceil);
DEFINE_UNARY_FUNCTOR(CeilHalf2, h2ceil);
DEFINE_UNARY_FUNCTOR(CosHalf, hcos);
DEFINE_UNARY_FUNCTOR(CosHalf2, h2cos);
DEFINE_UNARY_FUNCTOR(ExpHalf, hexp);
DEFINE_UNARY_FUNCTOR(ExpHalf2, h2exp);
DEFINE_UNARY_FUNCTOR(FloorHalf, hfloor);
DEFINE_UNARY_FUNCTOR(FloorHalf2, h2floor);
DEFINE_UNARY_FUNCTOR(InvHalf, hrcp);
DEFINE_UNARY_FUNCTOR(InvHalf2, h2rcp);
DEFINE_UNARY_FUNCTOR(LogHalf, hlog);
DEFINE_UNARY_FUNCTOR(LogHalf2, h2log);
DEFINE_UNARY_FUNCTOR(RoundHalf, hrint);
DEFINE_UNARY_FUNCTOR(RoundHalf2, h2rint);
DEFINE_UNARY_FUNCTOR(RsqrtHalf, hrsqrt);
DEFINE_UNARY_FUNCTOR(RsqrtHalf2, h2rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf, hsin);
DEFINE_UNARY_FUNCTOR(SinHalf2, h2sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf, hsqrt);
DEFINE_UNARY_FUNCTOR(SqrtHalf2, h2sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
return __float2half(func(__half2float(x))); \
} \
}
#if __CUDA_ARCH__ < 530
DEFINE_UNARY_FUNCTOR(NegHalf, -);
DEFINE_UNARY_FUNCTOR(CeilHalf, ceil);
DEFINE_UNARY_FUNCTOR(CosHalf, cos);
DEFINE_UNARY_FUNCTOR(ExpHalf, exp);
DEFINE_UNARY_FUNCTOR(FloorHalf, floor);
DEFINE_UNARY_FUNCTOR(InvHalf, __frcp_rn);
DEFINE_UNARY_FUNCTOR(LogHalf, log);
DEFINE_UNARY_FUNCTOR(RoundHalf, round);
DEFINE_UNARY_FUNCTOR(RsqrtHalf, rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf, sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf, sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
#define DEFINE_UNARY_FUNCTOR(name, func) \
template <typename T> \
struct name##Functor { \
inline __device__ T operator()(const T& x) const { \
const float2 val = __half22float2(x); \
return __floats2half2_rn(func(val.x), func(val.y)); \
} \
}
#if __CUDA_ARCH__ < 530
DEFINE_UNARY_FUNCTOR(NegHalf2, -);
DEFINE_UNARY_FUNCTOR(CeilHalf2, ceil);
DEFINE_UNARY_FUNCTOR(CosHalf2, cos);
DEFINE_UNARY_FUNCTOR(ExpHalf2, exp);
DEFINE_UNARY_FUNCTOR(FloorHalf2, floor);
DEFINE_UNARY_FUNCTOR(InvHalf2, __frcp_rn);
DEFINE_UNARY_FUNCTOR(LogHalf2, log);
DEFINE_UNARY_FUNCTOR(RoundHalf2, round);
DEFINE_UNARY_FUNCTOR(RsqrtHalf2, rsqrt);
DEFINE_UNARY_FUNCTOR(SinHalf2, sin);
DEFINE_UNARY_FUNCTOR(SqrtHalf2, sqrt);
#endif
#undef DEFINE_UNARY_FUNCTOR
/*!
* Unary Function Kernels
*/
template <typename InputT, typename OutputT, class Functor>
__global__ void
_SimpleUnaryFunc(const int N, const Functor op, const InputT* x, OutputT* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(x[i]);
}
}
template <typename T>
__global__ void _Abs(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const T val = x[i];
y[i] = val > 0 ? val : -val;
}
}
template <>
__global__ void _Abs<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(x[i]);
y[i] = __float2half(val > 0 ? val : -val);
}
}
template <>
__global__ void _Abs<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(
val.x > 0.f ? val.x : -val.x, val.y > 0.f ? val.y : -val.y);
}
}
__global__ void _Inv(const int N, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __frcp_rn(x[i]);
}
}
__global__ void _Inv(const int N, const double* x, double* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __drcp_rn(x[i]);
}
}
template <typename T>
__global__ void _InvStd(const int N, const T eps, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = rsqrt(x[i] + eps);
}
}
__global__ void _InvStd(const int N, const float eps, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(rsqrt(__half2float(x[i]) + eps));
}
}
__global__ void
_InvStd(const int N, const float eps, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(rsqrt(val.x + eps), rsqrt(val.y + eps));
}
}
template <typename T>
__global__ void _Powx(const int N, const T exponent, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = pow(x[i], exponent);
}
}
__global__ void
_Powx(const int N, const float exponent, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(pow(__half2float(x[i]), exponent));
}
}
__global__ void
_Powx(const int N, const float exponent, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] = __floats2half2_rn(pow(val.x, exponent), pow(val.y, exponent));
}
}
template <typename T>
__global__ void _Set(const int N, const T alpha, T* x) {
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = alpha;
}
}
template <typename T>
__global__ void _Sign(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::Sign(x[i]);
}
}
template <>
__global__ void _Sign<uint8_t>(const int N, const uint8_t* x, uint8_t* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] > 0 ? uint8_t(1) : uint8_t(0);
}
}
template <>
__global__ void _Sign<half>(const int N, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float val = __half2float(x[i]);
y[i] = __float2half(math::utils::Sign(val));
}
}
template <>
__global__ void _Sign<half2>(const int N, const half2* x, half2* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float2 val = __half22float2(x[i]);
y[i] =
__floats2half2_rn(math::utils::Sign(val.x), math::utils::Sign(val.y));
}
}
template <typename T>
__global__ void _Square(const int N, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::Square(x[i]);
}
}
template <typename T>
__global__ void _NotZero(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] != T(0) ? true : false;
}
}
template <>
__global__ void _NotZero<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __half2float(x[i]) != 0.f ? true : false;
}
}
template <typename T>
__global__ void _IsInf(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsInf(x[i]);
}
}
template <>
__global__ void _IsInf<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsInf(x[i]);
}
}
template <typename T>
__global__ void _IsNaN(const int N, const T* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(x[i]);
}
}
template <>
__global__ void _IsNaN<half>(const int N, const half* x, bool* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(x[i]);
}
}
template <typename T>
__global__ void _ReplaceNaN(const int N, const T value, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(__ldg(x + i)) ? value : __ldg(x + i);
}
}
template <>
__global__ void
_ReplaceNaN<half>(const int N, const half value, const half* x, half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = math::utils::IsNaN(__ldg(x + i)) ? value : __ldg(x + i);
}
}
template <typename T, class Functor>
__global__ void
_Bias(const int N, const T beta, const Functor op, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(x[i], beta);
}
}
template <typename T>
__global__ void
_ApplyMask(const int N, const T alpha, const uint8_t* mask, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i] * T(mask[i]) * alpha;
}
}
__global__ void _ApplyMask(
const int N,
const float alpha,
const uint8_t* mask,
const half* x,
half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = __float2half(__half2float(x[i]) * (alpha * float(mask[i])));
}
}
/*!
* Binary Function Kernels
*/
template <typename InputT, typename OutputT, class Functor>
__global__ void _SimpleBinaryFunc(
const int N,
const Functor op,
const InputT* a,
const InputT* b,
OutputT* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = op(a[i], b[i]);
}
}
template <typename T>
__global__ void
_Where(const int N, const T* a, const T* b, const bool* c, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = c[i] ? a[i] : b[i];
}
}
} // namespace
#define DEFINE_UNARY_FUNC(name, InputT, OutputT, Functor) \
template <> \
DRAGON_API void name<InputT, CUDAContext>( \
const int N, const InputT* x, OutputT* y, CUDAContext* ctx) { \
_SimpleUnaryFunc<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, Functor<InputT>(), x, y); \
}
DEFINE_UNARY_FUNC(BitwiseNot, bool, bool, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, uint8_t, uint8_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int8_t, int8_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int, int, math::BitNotFunctor);
DEFINE_UNARY_FUNC(BitwiseNot, int64_t, int64_t, math::BitNotFunctor);
DEFINE_UNARY_FUNC(Not, bool, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, uint8_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int8_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, int64_t, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, float16, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, float, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Not, double, bool, math::NotFunctor);
DEFINE_UNARY_FUNC(Neg, int8_t, int8_t, NegFunctor);
DEFINE_UNARY_FUNC(Neg, int, int, NegFunctor);
DEFINE_UNARY_FUNC(Neg, int64_t, int64_t, NegFunctor);
DEFINE_UNARY_FUNC(Neg, float, float, NegFunctor);
DEFINE_UNARY_FUNC(Neg, double, double, NegFunctor);
DEFINE_UNARY_FUNC(Ceil, float, float, CeilFunctor);
DEFINE_UNARY_FUNC(Ceil, double, double, CeilFunctor);
DEFINE_UNARY_FUNC(Cos, float, float, CosFunctor);
DEFINE_UNARY_FUNC(Cos, double, double, CosFunctor);
DEFINE_UNARY_FUNC(Exp, float, float, ExpFunctor);
DEFINE_UNARY_FUNC(Exp, double, double, ExpFunctor);
DEFINE_UNARY_FUNC(Floor, float, float, FloorFunctor);
DEFINE_UNARY_FUNC(Floor, double, double, FloorFunctor);
DEFINE_UNARY_FUNC(Log, float, float, LogFunctor);
DEFINE_UNARY_FUNC(Log, double, double, LogFunctor);
DEFINE_UNARY_FUNC(Round, float, float, RoundFunctor);
DEFINE_UNARY_FUNC(Round, double, double, RoundFunctor);
DEFINE_UNARY_FUNC(Rsqrt, float, float, RsqrtFunctor);
DEFINE_UNARY_FUNC(Rsqrt, double, double, RsqrtFunctor);
DEFINE_UNARY_FUNC(Sin, float, float, SinFunctor);
DEFINE_UNARY_FUNC(Sin, double, double, SinFunctor);
DEFINE_UNARY_FUNC(Sqrt, float, float, SqrtFunctor);
DEFINE_UNARY_FUNC(Sqrt, double, double, SqrtFunctor);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name, T) \
template <> \
DRAGON_API void name<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* ctx) { \
_##name<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(N, x, y); \
}
DEFINE_UNARY_FUNC(Abs, uint8_t);
DEFINE_UNARY_FUNC(Abs, int8_t);
DEFINE_UNARY_FUNC(Abs, int);
DEFINE_UNARY_FUNC(Abs, int64_t);
DEFINE_UNARY_FUNC(Abs, float);
DEFINE_UNARY_FUNC(Abs, double);
DEFINE_UNARY_FUNC(Inv, float);
DEFINE_UNARY_FUNC(Inv, double);
DEFINE_UNARY_FUNC(Sign, uint8_t);
DEFINE_UNARY_FUNC(Sign, int8_t);
DEFINE_UNARY_FUNC(Sign, int);
DEFINE_UNARY_FUNC(Sign, int64_t);
DEFINE_UNARY_FUNC(Sign, float);
DEFINE_UNARY_FUNC(Sign, double);
DEFINE_UNARY_FUNC(Square, uint8_t);
DEFINE_UNARY_FUNC(Square, int8_t);
DEFINE_UNARY_FUNC(Square, int);
DEFINE_UNARY_FUNC(Square, int64_t);
DEFINE_UNARY_FUNC(Square, float);
DEFINE_UNARY_FUNC(Square, double);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name, HalfFunctor, Half2Functor) \
template <> \
DRAGON_API void name<float16, CUDAContext>( \
const int N, const float16* x, float16* y, CUDAContext* ctx) { \
if ((N & 1) == 0) { \
_SimpleUnaryFunc<<< \
CUDA_BLOCKS(N >> 1), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
N >> 1, \
Half2Functor<half2>(), \
reinterpret_cast<const half2*>(x), \
reinterpret_cast<half2*>(y)); \
} else { \
_SimpleUnaryFunc<<< \
CUDA_BLOCKS(N), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
N, \
HalfFunctor<half>(), \
reinterpret_cast<const half*>(x), \
reinterpret_cast<half*>(y)); \
} \
}
DEFINE_UNARY_FUNC(Neg, NegHalfFunctor, NegHalf2Functor);
DEFINE_UNARY_FUNC(Ceil, CeilHalfFunctor, CeilHalf2Functor);
DEFINE_UNARY_FUNC(Cos, CosHalfFunctor, CosHalf2Functor);
DEFINE_UNARY_FUNC(Exp, ExpHalfFunctor, ExpHalf2Functor);
DEFINE_UNARY_FUNC(Floor, FloorHalfFunctor, FloorHalf2Functor);
DEFINE_UNARY_FUNC(Log, LogHalfFunctor, LogHalf2Functor);
DEFINE_UNARY_FUNC(Inv, InvHalfFunctor, InvHalf2Functor);
DEFINE_UNARY_FUNC(Round, RoundHalfFunctor, RoundHalf2Functor);
DEFINE_UNARY_FUNC(Rsqrt, RsqrtHalfFunctor, RsqrtHalf2Functor);
DEFINE_UNARY_FUNC(Sin, SinHalfFunctor, SinHalf2Functor);
DEFINE_UNARY_FUNC(Sqrt, SqrtHalfFunctor, SqrtHalf2Functor);
#undef DEFINE_UNARY_FUNC
#define DEFINE_UNARY_FUNC(name) \
template <> \
DRAGON_API void name<float16, CUDAContext>( \
const int N, const float16* x, float16* y, CUDAContext* ctx) { \
if ((N & 1) == 0) { \
_##name<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N >> 1, \
reinterpret_cast<const half2*>(x), \
reinterpret_cast<half2*>(y)); \
} else { \
_##name<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); \
} \
}
DEFINE_UNARY_FUNC(Abs);
DEFINE_UNARY_FUNC(Sign);
DEFINE_UNARY_FUNC(Square);
#undef DEFINE_UNARY_FUNC
#define DEFINE_SET_FUNC(T) \
template <> \
DRAGON_API void Set<T, CUDAContext>( \
const int N, const T value, T* y, CUDAContext* ctx) { \
if (value == T(0)) { \
CUDA_CHECK(cudaMemsetAsync(y, 0, sizeof(T) * N, ctx->cuda_stream())); \
} else { \
_Set<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, value, y); \
} \
}
template <>
DRAGON_API void Set<float16, CUDAContext>(
const int N,
const float16 value,
float16* y,
CUDAContext* ctx) {
if (value.x == (unsigned short)0) {
CUDA_CHECK(cudaMemsetAsync(y, 0, sizeof(float16) * N, ctx->cuda_stream()));
return;
}
if ((N & 1) == 0) {
_Set<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1, convert::To<half2>(value), reinterpret_cast<half2*>(y));
} else {
_Set<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(N, value, y);
}
}
DEFINE_SET_FUNC(bool);
DEFINE_SET_FUNC(uint8_t);
DEFINE_SET_FUNC(int8_t);
DEFINE_SET_FUNC(int);
DEFINE_SET_FUNC(int64_t);
DEFINE_SET_FUNC(float);
DEFINE_SET_FUNC(double);
#undef DEFINE_SET_FUNC
#define DEFINE_INVSTD_FUNC(T) \
template <> \
DRAGON_API void InvStd<T, CUDAContext>( \
const int N, const float eps, const T* x, T* y, CUDAContext* ctx) { \
_InvStd<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, (T)eps, x, y); \
}
template <>
DRAGON_API void InvStd<float16, CUDAContext>(
const int N,
const float eps,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
_InvStd<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1,
eps,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
_InvStd<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, eps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y));
}
}
DEFINE_INVSTD_FUNC(float);
DEFINE_INVSTD_FUNC(double);
#undef DEFINE_INVSTD_FUNC
#define DEFINE_POWX_FUNC(T) \
template <> \
DRAGON_API void Powx<T, CUDAContext>( \
const int N, const float exponent, const T* x, T* y, CUDAContext* ctx) { \
_Powx<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, (T)exponent, x, y); \
}
template <>
DRAGON_API void Powx<float16, CUDAContext>(
const int N,
const float exponent,
const float16* x,
float16* y,
CUDAContext* ctx) {
if ((N & 1) == 0) {
_Powx<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1,
exponent,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
_Powx<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N,
exponent,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
}
DEFINE_POWX_FUNC(float);
DEFINE_POWX_FUNC(double);
#undef DEFINE_POWX_FUNC
#define DEFINE_NOT_ZERO_FUNC(T) \
template <> \
DRAGON_API void NotZero<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
_NotZero<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, x, y); \
}
template <>
DRAGON_API void NotZero<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
_NotZero<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_NOT_ZERO_FUNC(bool);
DEFINE_NOT_ZERO_FUNC(uint8_t);
DEFINE_NOT_ZERO_FUNC(int8_t);
DEFINE_NOT_ZERO_FUNC(int);
DEFINE_NOT_ZERO_FUNC(int64_t);
DEFINE_NOT_ZERO_FUNC(float);
DEFINE_NOT_ZERO_FUNC(double);
#undef DEFINE_NOT_ZERO_FUNC
#define DEFINE_IS_INF_FUNC(T) \
template <> \
DRAGON_API void IsInf<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
_IsInf<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(N, x, y); \
}
template <>
DRAGON_API void IsInf<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
_IsInf<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_IS_INF_FUNC(float);
DEFINE_IS_INF_FUNC(double);
#undef DEFINE_IS_INF_FUNC
#define DEFINE_IS_NAN_FUNC(T) \
template <> \
DRAGON_API void IsNaN<T, CUDAContext>( \
const int N, const T* x, bool* y, CUDAContext* ctx) { \
_IsNaN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(N, x, y); \
}
template <>
DRAGON_API void IsNaN<float16, CUDAContext>(
const int N,
const float16* x,
bool* y,
CUDAContext* ctx) {
_IsNaN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, reinterpret_cast<const half*>(x), y);
}
DEFINE_IS_NAN_FUNC(float);
DEFINE_IS_NAN_FUNC(double);
#undef DEFINE_IS_NAN_FUNC
#define DEFINE_REPLACE_NAN_FUNC(T) \
template <> \
DRAGON_API void ReplaceNaN<T, CUDAContext>( \
const int N, const T value, const T* x, T* y, CUDAContext* ctx) { \
_ReplaceNaN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, value, x, y); \
}
template <>
DRAGON_API void ReplaceNaN<float16, CUDAContext>(
const int N,
const float16 value,
const float16* x,
float16* y,
CUDAContext* ctx) {
_ReplaceNaN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N,
convert::To<half>(value),
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
DEFINE_REPLACE_NAN_FUNC(float);
DEFINE_REPLACE_NAN_FUNC(double);
#undef DEFINE_REPLACE_NAN_FUNC
#define DEFINE_BIAS_FUNC(T) \
template <> \
DRAGON_API void Bias<T, CUDAContext>( \
const int N, const float beta, const T* x, T* y, CUDAContext* ctx) { \
if (beta == 0.f) return; \
_Bias<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, (T)beta, math::PlusFunctor<T>(), x, y); \
}
template <>
DRAGON_API void Bias<float16, CUDAContext>(
const int N,
const float beta,
const float16* x,
float16* y,
CUDAContext* ctx) {
if (beta == 0.f) return;
if ((N & 1) == 0) {
_Bias<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N >> 1,
convert::To<half2>(beta),
math::PlusFunctor<half2>(),
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
} else {
_Bias<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N,
convert::To<half>(beta),
math::PlusFunctor<half>(),
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
}
DEFINE_BIAS_FUNC(uint8_t);
DEFINE_BIAS_FUNC(int8_t);
DEFINE_BIAS_FUNC(int);
DEFINE_BIAS_FUNC(int64_t);
DEFINE_BIAS_FUNC(float);
DEFINE_BIAS_FUNC(double);
#undef DEFINE_BIAS_FUNC
#define DEFINE_APPLY_MASK_FUNC(T, AccT) \
template <> \
DRAGON_API void ApplyMask<T, CUDAContext>( \
const int N, \
const float alpha, \
const uint8_t* mask, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
_ApplyMask<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, \
convert::To<AccT>(alpha), \
mask, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
reinterpret_cast<math::ScalarType<T>::type*>(y)); \
}
DEFINE_APPLY_MASK_FUNC(uint8_t, uint8_t);
DEFINE_APPLY_MASK_FUNC(int8_t, int8_t);
DEFINE_APPLY_MASK_FUNC(int, int);
DEFINE_APPLY_MASK_FUNC(int64_t, int64_t);
DEFINE_APPLY_MASK_FUNC(float16, float);
DEFINE_APPLY_MASK_FUNC(float, float);
DEFINE_APPLY_MASK_FUNC(double, double);
#undef DEFINE_APPLY_MASK_FUNC
#define DEFINE_BINARY_FUNC(name, InputT, OutputT, Functor) \
template <> \
DRAGON_API void name<InputT, CUDAContext>( \
const int N, \
const InputT* a, \
const InputT* b, \
OutputT* y, \
CUDAContext* ctx) { \
_SimpleBinaryFunc<<< \
CUDA_BLOCKS(N), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
N, \
Functor<math::ScalarType<InputT>::type>(), \
reinterpret_cast<const math::ScalarType<InputT>::type*>(a), \
reinterpret_cast<const math::ScalarType<InputT>::type*>(b), \
reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \
}
DEFINE_BINARY_FUNC(Add, uint8_t, uint8_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int8_t, int8_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int, int, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, int64_t, int64_t, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, float16, float16, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, float, float, math::PlusFunctor);
DEFINE_BINARY_FUNC(Add, double, double, math::PlusFunctor);
DEFINE_BINARY_FUNC(Sub, uint8_t, uint8_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int8_t, int8_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int, int, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, int64_t, int64_t, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, float16, float16, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, float, float, math::MinusFunctor);
DEFINE_BINARY_FUNC(Sub, double, double, math::MinusFunctor);
DEFINE_BINARY_FUNC(Mul, uint8_t, uint8_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int8_t, int8_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int, int, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, int64_t, int64_t, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, float16, float16, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, float, float, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Mul, double, double, math::MultipliesFunctor);
DEFINE_BINARY_FUNC(Div, uint8_t, uint8_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int8_t, int8_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int, int, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, int64_t, int64_t, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, float16, float16, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, float, float, math::DividesFunctor);
DEFINE_BINARY_FUNC(Div, double, double, math::DividesFunctor);
DEFINE_BINARY_FUNC(Pow, float16, float16, math::PowFunctor);
DEFINE_BINARY_FUNC(Pow, float, float, math::PowFunctor);
DEFINE_BINARY_FUNC(Pow, double, double, math::PowFunctor);
DEFINE_BINARY_FUNC(Minimum, uint8_t, uint8_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int8_t, int8_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int, int, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, int64_t, int64_t, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, float16, float16, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, float, float, math::MinFunctor);
DEFINE_BINARY_FUNC(Minimum, double, double, math::MinFunctor);
DEFINE_BINARY_FUNC(Maximum, uint8_t, uint8_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int8_t, int8_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int, int, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, int64_t, int64_t, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, float16, float16, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, float, float, math::MaxFunctor);
DEFINE_BINARY_FUNC(Maximum, double, double, math::MaxFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, bool, bool, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, uint8_t, uint8_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int8_t, int8_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int, int, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseAnd, int64_t, int64_t, math::BitAndFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, bool, bool, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, uint8_t, uint8_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int8_t, int8_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int, int, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseOr, int64_t, int64_t, math::BitOrFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, bool, bool, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, uint8_t, uint8_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int8_t, int8_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int, int, math::BitXorFunctor);
DEFINE_BINARY_FUNC(BitwiseXor, int64_t, int64_t, math::BitXorFunctor);
DEFINE_BINARY_FUNC(And, bool, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, uint8_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int8_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, int64_t, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, float16, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, float, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(And, double, bool, math::AndFunctor);
DEFINE_BINARY_FUNC(Or, bool, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, uint8_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int8_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, int64_t, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, float16, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, float, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Or, double, bool, math::OrFunctor);
DEFINE_BINARY_FUNC(Xor, bool, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, uint8_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int8_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, int64_t, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, float16, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, float, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Xor, double, bool, math::XorFunctor);
DEFINE_BINARY_FUNC(Equal, bool, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, uint8_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int8_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, int64_t, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, float16, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, float, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(Equal, double, bool, math::EqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, bool, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, uint8_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int8_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, int64_t, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, float16, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, float, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(NotEqual, double, bool, math::NotEqualFunctor);
DEFINE_BINARY_FUNC(Less, bool, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, uint8_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int8_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, int64_t, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, float16, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, float, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(Less, double, bool, math::LessFunctor);
DEFINE_BINARY_FUNC(LessEqual, bool, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, uint8_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int8_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, int64_t, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, float16, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, float, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(LessEqual, double, bool, math::LessEqualFunctor);
DEFINE_BINARY_FUNC(Greater, bool, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, uint8_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int8_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, int64_t, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, float16, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, float, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(Greater, double, bool, math::GreaterFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, bool, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, uint8_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int8_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, int64_t, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, float16, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, float, bool, math::GreaterEqualFunctor);
DEFINE_BINARY_FUNC(GreaterEqual, double, bool, math::GreaterEqualFunctor);
#undef DEFINE_BINARY_FUNC
#define DEFINE_WHERE_FUNC(T) \
template <> \
DRAGON_API void Where<T, CUDAContext>( \
const int N, \
const T* a, \
const T* b, \
const bool* c, \
T* y, \
CUDAContext* ctx) { \
_Where<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, a, b, c, y); \
}
DEFINE_WHERE_FUNC(uint8_t);
DEFINE_WHERE_FUNC(bool);
DEFINE_WHERE_FUNC(int8_t);
DEFINE_WHERE_FUNC(int);
DEFINE_WHERE_FUNC(int64_t);
DEFINE_WHERE_FUNC(float16);
DEFINE_WHERE_FUNC(float);
DEFINE_WHERE_FUNC(double);
#undef DEFINE_WHERE_FUNC
} // namespace math
} // namespace dragon
#endif // USE_CUDA
|
6b493092ee5e2284d9bbd4e6e47ab2780c2fe72f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <hip/hip_complex.h>
__global__ void rsub_double(int n, double *a, double *b, double *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = b[i] - a[i];
}
} | 6b493092ee5e2284d9bbd4e6e47ab2780c2fe72f.cu | extern "C"
#include <cuComplex.h>
__global__ void rsub_double(int n, double *a, double *b, double *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = b[i] - a[i];
}
} |
3fb4bf2272f84bddae2febac29cae37947b36243.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #define DEBUG
// #define ONLY_NORM2
#ifdef DEBUG
#define FOURMB (2 * 1024 * 1024)
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 64
#define INITN 128
#else
#define FOURMB (2 * 1024 * 1024)
// #define FOURM
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 128
#define INITN 1024
#endif
// homework1
// TODO: GPU
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#define CUDA_CALL(x) \
{ \
const hipError_t a = (x); \
if (a != hipSuccess) \
{ \
printf("\nCUDA Error: %s (err_num = %d)\n", hipGetErrorString(a), a); \
hipDeviceReset(); \
assert(0); \
} \
}
// TODO: GPU kernel,rbfComputeGPU
#ifndef ONLY_NORM2
__device__ void wrapReduce(volatile int *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
sdata[tid] += (i + blockDim.x < n) ? g_idata[i + blockDim.x] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid < 32)
wrapReduce(sdata, tid);
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
#endif
__global__ void norm2(int *input, int *output, int len)
{
extern __shared__ int smem[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
int truei = blockIdx.x * blockDim.x + threadIdx.x;
smem[tid] = input[i] - input[i + blockDim.x];
__syncthreads();
smem[tid] = smem[tid] * smem[tid];
output[truei] = smem[tid];
}
// no fusion version:
__host__ int rbfComputeGPU(int *input1, int *input2, int len)
{
int *d_idata1;
// int *d_idata2;
int *d_idata;
int *d_odata;
int *d_intermediateSums;
int res = 0;
int nReduBlocks = len / NTHREADS / 2;
int n2NormBlocks = len / NTHREADS;
int calBytes = len * sizeof(int);
// TODO: gpu
// CUDA_CALL();
CUDA_CALL(hipMalloc((void **)&d_idata1, calBytes * 2));
// CUDA_CALL(hipMalloc((void **)&d_idata2, calBytes));
CUDA_CALL(hipMalloc((void **)&d_idata, calBytes));
CUDA_CALL(hipMalloc((void **)&d_odata, nReduBlocks * sizeof(int)));
CUDA_CALL(hipMalloc((void **)&d_intermediateSums, sizeof(int) * nReduBlocks));
// TODO: cpugpuglobalMemory
for (int i = 0; i < len; i += NTHREADS)
{
CUDA_CALL(hipMemcpy(&d_idata1[i * 2], &input1[i], NTHREADS * sizeof(int), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(&d_idata1[i * 2 + NTHREADS], &input2[i], NTHREADS * sizeof(int), hipMemcpyHostToDevice));
}
// CUDA_CALL(hipMemcpy(d_idata1, input1, calBytes, hipMemcpyHostToDevice));
// CUDA_CALL(hipMemcpy(d_idata2, input2, calBytes, hipMemcpyHostToDevice));
#ifdef DEBUG
int *test2norm;
test2norm = (int *)malloc(calBytes);
assert(test2norm);
#endif
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
// 100,
for (int idx = 0; idx < 100; idx++)
{
// res = 0;
// TODO: gpu,RBF
dim3 dimBlock(NTHREADS, 1, 1);
dim3 dimGrid(n2NormBlocks, 1, 1);
int smemSize = NTHREADS * sizeof(int);
// dim3 trydimBlock((NTHREADS << 1), 1, 1);
hipLaunchKernelGGL(( norm2), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata1, d_idata, len);
#ifdef DEBUG
CUDA_CALL(hipMemcpy(test2norm, d_idata, calBytes, hipMemcpyDeviceToHost));
#endif
#ifndef ONLY_NORM2
dimGrid.x = nReduBlocks;
hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, len);
int s = nReduBlocks;
while (s > 1)
{
dim3 dimGrid((s + NTHREADS - 1) / NTHREADS, 1, 1);
CUDA_CALL(hipMemcpy(d_intermediateSums, d_odata, s * sizeof(int), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_intermediateSums, d_odata, s);
CUDA_CALL(hipGetLastError());
s /= (NTHREADS * 2);
}
// TODO: gpucpu
CUDA_CALL(hipMemcpy(&res, d_odata, sizeof(int), hipMemcpyDeviceToHost));
#endif
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("GPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
// TODO: gpu
#ifdef DEBUG
printf("test for 2norm in GPU:\n");
for (int i = 0; i < len; ++i)
{
if ((input1[i] - input2[i]) * (input1[i] - input2[i]) != test2norm[i])
printf("i:%d, test:%d, true:%d\n", i, test2norm[i], (input1[i] - input2[i]) * (input1[i] - input2[i]));
}
free(test2norm);
#endif
// CUDA_CALL(hipFree(d_idata2));
CUDA_CALL(hipFree(d_idata1));
CUDA_CALL(hipFree(d_idata));
CUDA_CALL(hipFree(d_odata));
CUDA_CALL(hipFree(d_intermediateSums));
return res;
}
// cpu
int rbfComputeCPU(int *input1, int *input2, int len)
{
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
int res = 0;
for (int idx = 0; idx < 100; idx++)
{
res = 0;
for (int i = 0; i < len; i++)
{
#ifndef ONLY_NORM2
res += (input1[i] - input2[i]) * (input1[i] - input2[i]);
#else
res = (input1[i] - input2[i]) * (input1[i] - input2[i]);
#endif
}
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("CPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
return res;
}
__host__ int main()
{
int *h_idata1, *h_idata2;
h_idata1 = (int *)malloc(BYTES);
h_idata2 = (int *)malloc(BYTES);
assert(h_idata1);
assert(h_idata2);
srand((unsigned)time(NULL));
for (int i = 0; i < FOURMB; i++)
{
h_idata1[i] = rand() & 0xff;
h_idata2[i] = rand() & 0xff;
}
printf("initialize ready\n");
for (int n = INITN; n <= FOURMB; n *= 4)
{
printf("n=%d:\n", n);
int cpu_result = rbfComputeCPU(h_idata1, h_idata2, n);
int gpu_result = rbfComputeGPU(h_idata1, h_idata2, n);
#ifndef ONLY_NORM2
if (cpu_result != gpu_result)
{
printf("ERROR happen when compute %d\n", n);
printf("cpu_result = %d,gpu_result = %d\n", cpu_result, gpu_result);
free(h_idata1);
free(h_idata2);
exit(1);
}
#endif
}
free(h_idata1);
free(h_idata2);
} | 3fb4bf2272f84bddae2febac29cae37947b36243.cu | // #define DEBUG
// #define ONLY_NORM2
#ifdef DEBUG
#define FOURMB (2 * 1024 * 1024)
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 64
#define INITN 128
#else
#define FOURMB (2 * 1024 * 1024)
// #define FOURM
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 128
#define INITN 1024
#endif
// homework1程序
// TODO: GPU版本计算两个向量差的二范数
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#define CUDA_CALL(x) \
{ \
const cudaError_t a = (x); \
if (a != cudaSuccess) \
{ \
printf("\nCUDA Error: %s (err_num = %d)\n", cudaGetErrorString(a), a); \
cudaDeviceReset(); \
assert(0); \
} \
}
// TODO: 定义GPU kernel函数,并在rbfComputeGPU中调用
#ifndef ONLY_NORM2
__device__ void wrapReduce(volatile int *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
sdata[tid] += (i + blockDim.x < n) ? g_idata[i + blockDim.x] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid < 32)
wrapReduce(sdata, tid);
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
#endif
__global__ void norm2(int *input, int *output, int len)
{
extern __shared__ int smem[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
int truei = blockIdx.x * blockDim.x + threadIdx.x;
smem[tid] = input[i] - input[i + blockDim.x];
__syncthreads();
smem[tid] = smem[tid] * smem[tid];
output[truei] = smem[tid];
}
// no fusion version:
__host__ int rbfComputeGPU(int *input1, int *input2, int len)
{
int *d_idata1;
// int *d_idata2;
int *d_idata;
int *d_odata;
int *d_intermediateSums;
int res = 0;
int nReduBlocks = len / NTHREADS / 2;
int n2NormBlocks = len / NTHREADS;
int calBytes = len * sizeof(int);
// TODO: 在gpu上分配空间
// CUDA_CALL();
CUDA_CALL(cudaMalloc((void **)&d_idata1, calBytes * 2));
// CUDA_CALL(cudaMalloc((void **)&d_idata2, calBytes));
CUDA_CALL(cudaMalloc((void **)&d_idata, calBytes));
CUDA_CALL(cudaMalloc((void **)&d_odata, nReduBlocks * sizeof(int)));
CUDA_CALL(cudaMalloc((void **)&d_intermediateSums, sizeof(int) * nReduBlocks));
// TODO: 将cpu的输入拷到gpu上的globalMemory
for (int i = 0; i < len; i += NTHREADS)
{
CUDA_CALL(cudaMemcpy(&d_idata1[i * 2], &input1[i], NTHREADS * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(&d_idata1[i * 2 + NTHREADS], &input2[i], NTHREADS * sizeof(int), cudaMemcpyHostToDevice));
}
// CUDA_CALL(cudaMemcpy(d_idata1, input1, calBytes, cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(d_idata2, input2, calBytes, cudaMemcpyHostToDevice));
#ifdef DEBUG
int *test2norm;
test2norm = (int *)malloc(calBytes);
assert(test2norm);
#endif
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
// 重复100次,比较时间
for (int idx = 0; idx < 100; idx++)
{
// res = 0;
// TODO: 启动gpu计算函数,计算两个向量间的RBF
dim3 dimBlock(NTHREADS, 1, 1);
dim3 dimGrid(n2NormBlocks, 1, 1);
int smemSize = NTHREADS * sizeof(int);
// dim3 trydimBlock((NTHREADS << 1), 1, 1);
norm2<<<dimGrid, dimBlock, smemSize>>>(d_idata1, d_idata, len);
#ifdef DEBUG
CUDA_CALL(cudaMemcpy(test2norm, d_idata, calBytes, cudaMemcpyDeviceToHost));
#endif
#ifndef ONLY_NORM2
dimGrid.x = nReduBlocks;
reduce<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, len);
int s = nReduBlocks;
while (s > 1)
{
dim3 dimGrid((s + NTHREADS - 1) / NTHREADS, 1, 1);
CUDA_CALL(cudaMemcpy(d_intermediateSums, d_odata, s * sizeof(int), cudaMemcpyDeviceToDevice));
reduce<<<dimGrid, dimBlock, smemSize>>>(d_intermediateSums, d_odata, s);
CUDA_CALL(cudaGetLastError());
s /= (NTHREADS * 2);
}
// TODO: 将gpu的输出拷回cpu
CUDA_CALL(cudaMemcpy(&res, d_odata, sizeof(int), cudaMemcpyDeviceToHost));
#endif
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("GPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
// TODO: 释放掉申请的gpu内存
#ifdef DEBUG
printf("test for 2norm in GPU:\n");
for (int i = 0; i < len; ++i)
{
if ((input1[i] - input2[i]) * (input1[i] - input2[i]) != test2norm[i])
printf("i:%d, test:%d, true:%d\n", i, test2norm[i], (input1[i] - input2[i]) * (input1[i] - input2[i]));
}
free(test2norm);
#endif
// CUDA_CALL(cudaFree(d_idata2));
CUDA_CALL(cudaFree(d_idata1));
CUDA_CALL(cudaFree(d_idata));
CUDA_CALL(cudaFree(d_odata));
CUDA_CALL(cudaFree(d_intermediateSums));
return res;
}
// cpu版本
int rbfComputeCPU(int *input1, int *input2, int len)
{
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
int res = 0;
for (int idx = 0; idx < 100; idx++)
{
res = 0;
for (int i = 0; i < len; i++)
{
#ifndef ONLY_NORM2
res += (input1[i] - input2[i]) * (input1[i] - input2[i]);
#else
res = (input1[i] - input2[i]) * (input1[i] - input2[i]);
#endif
}
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("CPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
return res;
}
__host__ int main()
{
int *h_idata1, *h_idata2;
h_idata1 = (int *)malloc(BYTES);
h_idata2 = (int *)malloc(BYTES);
assert(h_idata1);
assert(h_idata2);
srand((unsigned)time(NULL));
for (int i = 0; i < FOURMB; i++)
{
h_idata1[i] = rand() & 0xff;
h_idata2[i] = rand() & 0xff;
}
printf("initialize ready\n");
for (int n = INITN; n <= FOURMB; n *= 4)
{
printf("n=%d:\n", n);
int cpu_result = rbfComputeCPU(h_idata1, h_idata2, n);
int gpu_result = rbfComputeGPU(h_idata1, h_idata2, n);
#ifndef ONLY_NORM2
if (cpu_result != gpu_result)
{
printf("ERROR happen when compute %d\n", n);
printf("cpu_result = %d,gpu_result = %d\n", cpu_result, gpu_result);
free(h_idata1);
free(h_idata2);
exit(1);
}
#endif
}
free(h_idata1);
free(h_idata2);
} |
a17b2952db618b86f0162125a8be86cc4f9fa397.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/hip/detail/IndexUtils.cuh>
#include <vector>
namespace at {
namespace cuda {
namespace detail {
struct SizeAndStride {
int64_t size;
int64_t stride;
};
/*
A comparator that will sort SizeAndStride structs by stride,
in ascending order.
*/
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
if (aS->stride < bS->stride) return -1;
if (aS->stride == bS->stride) return 0;
return 1;
}
/*
Returns false if there is no possibility that the tensor
has "overlapping" indices and true otherwise.
"Overlapping" indices are two+ valid indices that specify
the same offset within the tensor.
The function does this by checking for a sufficient but not
necessary condition of no overlap. In particular, that
that there exists an ordering of the tensor's dimensions
that is nicely "nested," with each dimension contained
within the next one.
*/
bool maybeOverlappingIndices(const Tensor& t) {
/* Extract size/stride arrays; only consider size >1 dims. */
std::vector<SizeAndStride> info(t.dim());
int dims = t.dim();
int nonSize1Dims = 0;
for (int i = 0; i < dims; ++i) {
int64_t size = t.size(i);
if (size > 1) {
info[nonSize1Dims].size = size;
info[nonSize1Dims].stride = t.stride(i);
if (info[nonSize1Dims].stride < 1) {
return true;
}
++nonSize1Dims;
}
}
// Short-circuits if tensor is a single element.
if (nonSize1Dims == 0) {
return false;
}
/* Ascending order (innermost dimension in sorted view is at [0]) */
qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);
for (int i = 0; i < (nonSize1Dims - 1); ++i) {
if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) {
return true;
}
}
return false;
}
} // detail
} // cuda
} // at
| a17b2952db618b86f0162125a8be86cc4f9fa397.cu | #include <ATen/cuda/detail/IndexUtils.cuh>
#include <vector>
namespace at {
namespace cuda {
namespace detail {
struct SizeAndStride {
int64_t size;
int64_t stride;
};
/*
A comparator that will sort SizeAndStride structs by stride,
in ascending order.
*/
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
if (aS->stride < bS->stride) return -1;
if (aS->stride == bS->stride) return 0;
return 1;
}
/*
Returns false if there is no possibility that the tensor
has "overlapping" indices and true otherwise.
"Overlapping" indices are two+ valid indices that specify
the same offset within the tensor.
The function does this by checking for a sufficient but not
necessary condition of no overlap. In particular, that
that there exists an ordering of the tensor's dimensions
that is nicely "nested," with each dimension contained
within the next one.
*/
bool maybeOverlappingIndices(const Tensor& t) {
/* Extract size/stride arrays; only consider size >1 dims. */
std::vector<SizeAndStride> info(t.dim());
int dims = t.dim();
int nonSize1Dims = 0;
for (int i = 0; i < dims; ++i) {
int64_t size = t.size(i);
if (size > 1) {
info[nonSize1Dims].size = size;
info[nonSize1Dims].stride = t.stride(i);
if (info[nonSize1Dims].stride < 1) {
return true;
}
++nonSize1Dims;
}
}
// Short-circuits if tensor is a single element.
if (nonSize1Dims == 0) {
return false;
}
/* Ascending order (innermost dimension in sorted view is at [0]) */
qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);
for (int i = 0; i < (nonSize1Dims - 1); ++i) {
if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) {
return true;
}
}
return false;
}
} // detail
} // cuda
} // at
|
b0e991d3a6cf292e9ba89139333267a3f77bf31b.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "fft_convolve_cuda.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__
void
cudaProdScaleKernel(const hipfftComplex *raw_data, const hipfftComplex *impulse_v,
hipfftComplex *out_data,
int padded_length) {
/* TODO ok: Implement the point-wise multiplication and scaling for the
FFT'd input and impulse response.
Recall that these are complex numbers, so you'll need to use the
appropriate rule for multiplying them.
Also remember to scale by the padded length of the signal
(see the notes for Question 1).
As in Assignment 1 and Week 1, remember to make your implementation
resilient to varying numbers of threads.
*/
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// if (thread_index == 1)
// printf("%d\n", &padded_length);
while (thread_index < padded_length) {
out_data[thread_index].x = raw_data[thread_index].x * impulse_v[thread_index].x - raw_data[thread_index].y * impulse_v[thread_index].y;
out_data[thread_index].x = out_data[thread_index].x / padded_length;
out_data[thread_index].y = raw_data[thread_index].x * impulse_v[thread_index].y + raw_data[thread_index].y * impulse_v[thread_index].x;
out_data[thread_index].y = out_data[thread_index].y / padded_length;
// if (thread_index == 1){
// printf("%f\n", &raw_data[thread_index].x);
// printf("%f\n", &impulse_v[thread_index].x);
// printf("%f\n", &out_data[thread_index].x);
// }
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void
cudaMaximumKernel(hipfftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2 ok: Implement the maximum-finding and subsequent
normalization (dividing by maximum).
There are many ways to do this reduction, and some methods
have much better performance than others.
For this section: Please explain your approach to the reduction,
including why you chose the optimizations you did
(especially as they relate to GPU hardware).
You'll likely find the above atomicMax function helpful.
(CUDA's atomicMax function doesn't work for floating-point values.)
It's based on two principles:
1) From Week 2, any atomic function can be implemented using
atomic compare-and-swap.
2) One can "represent" floating-point values as integers in
a way that preserves comparison, if the sign of the two
values is the same. (see http://stackoverflow.com/questions/
29596797/can-the-return-value-of-float-as-int-be-used-to-
compare-float-in-cuda)
*/
/*
allocate shared memory for 1024 floats, because the max number of thread per
block is 1024 for this hardware.
set the blockDim.x = 1024
*/
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ float data[512];
while (i < padded_length) {
atomicMax(&data[threadIdx.x], out_data[i].x);
i += blockDim.x * gridDim.x;
}
int l = blockDim.x;
while (l > 1) {
// int bias = l / 2;
l /= 2;
if (threadIdx.x < l) {
data[threadIdx.x] = (fabsf(data[threadIdx.x])>fabsf(data[threadIdx.x + l]))? \
data[threadIdx.x]:data[threadIdx.x + l];
}
__syncthreads();
// l /= 2;
}
if (threadIdx.x == 0) {
atomicMax(max_abs_val, fabsf(data[0]));
}
// while (i < padded_length) {
// data[threadIdx.x] = out_data[i].x;
// __syncthreads();
// int l = blockDim.x;
// while (l > 1) {
// int bias = l / 2;
// if (threadIdx.x < bias) {
// data[threadIdx.x] = (fabsf(data[threadIdx.x])>fabsf(data[threadIdx.x + bias]))? \
// data[threadIdx.x]:data[threadIdx.x + bias];
// }
// __syncthreads();
// // if (threadIdx.x < bias) {
// // atomicMax(&data[threadIdx.x], data[threadIdx.x + bias]);
// // }
// l /= 2;
// }
// if (threadIdx.x == 0) {
// atomicMax(max_abs_val, fabsf(data[0]));
// }
// i += blockDim.x * gridDim.x;
// }
}
__global__
void
cudaDivideKernel(hipfftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2 ok: Implement the division kernel. Divide all
data by the value pointed to by max_abs_val.
This kernel should be quite short.
*/
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < padded_length) {
// if (thread_index == 100) printf("%d divide\n", &thread_index);
out_data[thread_index].x /= *max_abs_val;
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallProdScaleKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const hipfftComplex *raw_data,
const hipfftComplex *impulse_v,
hipfftComplex *out_data,
const unsigned int padded_length) {
/* TODO ok Call the element-wise product and scaling kernel. */
hipLaunchKernelGGL(( cudaProdScaleKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, impulse_v, out_data, padded_length);
}
void cudaCallMaximumKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
hipfftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2 ok: Call the max-finding kernel. */
hipLaunchKernelGGL(( cudaMaximumKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, out_data, max_abs_val, padded_length);
}
void cudaCallDivideKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
hipfftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2 ok: Call the division kernel. */
hipLaunchKernelGGL(( cudaDivideKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, out_data, max_abs_val, padded_length);
}
| b0e991d3a6cf292e9ba89139333267a3f77bf31b.cu | /* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <cuda_runtime.h>
#include <cufft.h>
#include "fft_convolve_cuda.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__
void
cudaProdScaleKernel(const cufftComplex *raw_data, const cufftComplex *impulse_v,
cufftComplex *out_data,
int padded_length) {
/* TODO ok: Implement the point-wise multiplication and scaling for the
FFT'd input and impulse response.
Recall that these are complex numbers, so you'll need to use the
appropriate rule for multiplying them.
Also remember to scale by the padded length of the signal
(see the notes for Question 1).
As in Assignment 1 and Week 1, remember to make your implementation
resilient to varying numbers of threads.
*/
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// if (thread_index == 1)
// printf("%d\n", &padded_length);
while (thread_index < padded_length) {
out_data[thread_index].x = raw_data[thread_index].x * impulse_v[thread_index].x - raw_data[thread_index].y * impulse_v[thread_index].y;
out_data[thread_index].x = out_data[thread_index].x / padded_length;
out_data[thread_index].y = raw_data[thread_index].x * impulse_v[thread_index].y + raw_data[thread_index].y * impulse_v[thread_index].x;
out_data[thread_index].y = out_data[thread_index].y / padded_length;
// if (thread_index == 1){
// printf("%f\n", &raw_data[thread_index].x);
// printf("%f\n", &impulse_v[thread_index].x);
// printf("%f\n", &out_data[thread_index].x);
// }
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void
cudaMaximumKernel(cufftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2 ok: Implement the maximum-finding and subsequent
normalization (dividing by maximum).
There are many ways to do this reduction, and some methods
have much better performance than others.
For this section: Please explain your approach to the reduction,
including why you chose the optimizations you did
(especially as they relate to GPU hardware).
You'll likely find the above atomicMax function helpful.
(CUDA's atomicMax function doesn't work for floating-point values.)
It's based on two principles:
1) From Week 2, any atomic function can be implemented using
atomic compare-and-swap.
2) One can "represent" floating-point values as integers in
a way that preserves comparison, if the sign of the two
values is the same. (see http://stackoverflow.com/questions/
29596797/can-the-return-value-of-float-as-int-be-used-to-
compare-float-in-cuda)
*/
/*
allocate shared memory for 1024 floats, because the max number of thread per
block is 1024 for this hardware.
set the blockDim.x = 1024
*/
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ float data[512];
while (i < padded_length) {
atomicMax(&data[threadIdx.x], out_data[i].x);
i += blockDim.x * gridDim.x;
}
int l = blockDim.x;
while (l > 1) {
// int bias = l / 2;
l /= 2;
if (threadIdx.x < l) {
data[threadIdx.x] = (fabsf(data[threadIdx.x])>fabsf(data[threadIdx.x + l]))? \
data[threadIdx.x]:data[threadIdx.x + l];
}
__syncthreads();
// l /= 2;
}
if (threadIdx.x == 0) {
atomicMax(max_abs_val, fabsf(data[0]));
}
// while (i < padded_length) {
// data[threadIdx.x] = out_data[i].x;
// __syncthreads();
// int l = blockDim.x;
// while (l > 1) {
// int bias = l / 2;
// if (threadIdx.x < bias) {
// data[threadIdx.x] = (fabsf(data[threadIdx.x])>fabsf(data[threadIdx.x + bias]))? \
// data[threadIdx.x]:data[threadIdx.x + bias];
// }
// __syncthreads();
// // if (threadIdx.x < bias) {
// // atomicMax(&data[threadIdx.x], data[threadIdx.x + bias]);
// // }
// l /= 2;
// }
// if (threadIdx.x == 0) {
// atomicMax(max_abs_val, fabsf(data[0]));
// }
// i += blockDim.x * gridDim.x;
// }
}
__global__
void
cudaDivideKernel(cufftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2 ok: Implement the division kernel. Divide all
data by the value pointed to by max_abs_val.
This kernel should be quite short.
*/
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < padded_length) {
// if (thread_index == 100) printf("%d divide\n", &thread_index);
out_data[thread_index].x /= *max_abs_val;
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallProdScaleKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const cufftComplex *raw_data,
const cufftComplex *impulse_v,
cufftComplex *out_data,
const unsigned int padded_length) {
/* TODO ok Call the element-wise product and scaling kernel. */
cudaProdScaleKernel<<<blocks, threadsPerBlock>>>(raw_data, impulse_v, out_data, padded_length);
}
void cudaCallMaximumKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
cufftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2 ok: Call the max-finding kernel. */
cudaMaximumKernel<<<blocks, threadsPerBlock>>>(out_data, max_abs_val, padded_length);
}
void cudaCallDivideKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
cufftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2 ok: Call the division kernel. */
cudaDivideKernel<<<blocks, threadsPerBlock>>>(out_data, max_abs_val, padded_length);
}
|
189423328894e5d87bc229a9744f8ed78cd40d65.hip | // !!! This is a file automatically generated by hipify!!!
// Do not alter the preprocessor directives
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <hip/driver_types.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/pair.h"
#include "thrust/extrema.h"
#define NUM_CHANNELS 1
#define MinVal(x, y) (((x) < (y)) ? (x) : (y))
#define MaxVal(x, y) (((x) > (y)) ? (x) : (y))
//This function multiply every pixel value with scale constant
struct multiplyFunction
{
float a;
multiplyFunction(float s_constant) {
a = s_constant;
}
__host__ __device__
uint8_t operator()(const uint8_t& x) const
{
return a*x ;
}
};
//This function is another option to contrast image with one epoch. Subtracts the minimum value and multiply with scale constant together
struct scaleFunction
{
unsigned int a;
unsigned int b;
scaleFunction(unsigned int s_constant,unsigned int min) {
a = s_constant;
b= min;
}
__host__ __device__
uint8_t operator()(const uint8_t& x) const
{
return (x-b)*a ;
}
};
int main() {
int width; //image width
int height; //image height
int bpp; //bytes per pixel if the image was RGB (not used)
// Load a grayscale bmp image to an unsigned integer array with its height and weight.
// (uint8_t is an alias for "unsigned char")
uint8_t* image = stbi_load("./samples/640x426.bmp", &width, &height, &bpp, NUM_CHANNELS);
size_t image_size = width * height * sizeof(uint8_t);
// Print for sanity check
printf("Bytes per pixel: %d \n", bpp / 3); //Image is grayscale, so bpp / 3;
printf("Height: %d \n", height);
printf("Width: %d \n", width);
//Start Counter
hipEvent_t start, stop;
float elapsed_time_ms;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//Create device vector image_d and initialize with value of image
thrust::device_vector<uint8_t> image_d(image, image + (width * height));
// Find minimum and maximum values
int min_t = thrust::reduce(image_d.begin(), image_d.end(),255, thrust::minimum<int>());
int max_t = thrust::reduce(image_d.begin(), image_d.end(), 0, thrust::maximum<int>());
float scale_constant = 255.0f / (max_t - min_t);
// I designed 3 different thrust kernels. They do the same job with different methods
// Their performances are very similar so it does not matter which one you use
//option 1 for subtract and scale
thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 -= min_t);
thrust::transform(image_d.begin(), image_d.end(), image_d.begin(), multiplyFunction(scale_constant));
//option 2 for subtract and scale
//thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 -= min_t);
//thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 *= scale_constant);
//option 3 for subtract and scale
//thrust::transform(image_d.begin(), image_d.end(), image_d.begin(), scaleFunction(scale_constant,min_t));
//Copy enhanced image to host
thrust::host_vector<uint8_t> image_h(image_d);
uint8_t* image_e = image_h.data();
//Stop timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("\nTime to calculate results(GPU Time): %f ms.\n\n", elapsed_time_ms);
// Write image array into a bmp file
stbi_write_bmp("./samples/out_img.bmp", width, height, 1, image_e);
printf("\nEnchanced image successfully saved.\n\n");
//print minimum and maximum value
printf("Minimum Pixel Value: %d\n", min_t);
printf("Maximum Pixel Value: %d\n", max_t);
return 0;
}
| 189423328894e5d87bc229a9744f8ed78cd40d65.cu | // Do not alter the preprocessor directives
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <driver_types.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/pair.h"
#include "thrust/extrema.h"
#define NUM_CHANNELS 1
#define MinVal(x, y) (((x) < (y)) ? (x) : (y))
#define MaxVal(x, y) (((x) > (y)) ? (x) : (y))
//This function multiply every pixel value with scale constant
struct multiplyFunction
{
float a;
multiplyFunction(float s_constant) {
a = s_constant;
}
__host__ __device__
uint8_t operator()(const uint8_t& x) const
{
return a*x ;
}
};
//This function is another option to contrast image with one epoch. Subtracts the minimum value and multiply with scale constant together
struct scaleFunction
{
unsigned int a;
unsigned int b;
scaleFunction(unsigned int s_constant,unsigned int min) {
a = s_constant;
b= min;
}
__host__ __device__
uint8_t operator()(const uint8_t& x) const
{
return (x-b)*a ;
}
};
int main() {
int width; //image width
int height; //image height
int bpp; //bytes per pixel if the image was RGB (not used)
// Load a grayscale bmp image to an unsigned integer array with its height and weight.
// (uint8_t is an alias for "unsigned char")
uint8_t* image = stbi_load("./samples/640x426.bmp", &width, &height, &bpp, NUM_CHANNELS);
size_t image_size = width * height * sizeof(uint8_t);
// Print for sanity check
printf("Bytes per pixel: %d \n", bpp / 3); //Image is grayscale, so bpp / 3;
printf("Height: %d \n", height);
printf("Width: %d \n", width);
//Start Counter
cudaEvent_t start, stop;
float elapsed_time_ms;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Create device vector image_d and initialize with value of image
thrust::device_vector<uint8_t> image_d(image, image + (width * height));
// Find minimum and maximum values
int min_t = thrust::reduce(image_d.begin(), image_d.end(),255, thrust::minimum<int>());
int max_t = thrust::reduce(image_d.begin(), image_d.end(), 0, thrust::maximum<int>());
float scale_constant = 255.0f / (max_t - min_t);
// I designed 3 different thrust kernels. They do the same job with different methods
// Their performances are very similar so it does not matter which one you use
//option 1 for subtract and scale
thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 -= min_t);
thrust::transform(image_d.begin(), image_d.end(), image_d.begin(), multiplyFunction(scale_constant));
//option 2 for subtract and scale
//thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 -= min_t);
//thrust::for_each(image_d.begin(), image_d.end(), thrust::placeholders::_1 *= scale_constant);
//option 3 for subtract and scale
//thrust::transform(image_d.begin(), image_d.end(), image_d.begin(), scaleFunction(scale_constant,min_t));
//Copy enhanced image to host
thrust::host_vector<uint8_t> image_h(image_d);
uint8_t* image_e = image_h.data();
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("\nTime to calculate results(GPU Time): %f ms.\n\n", elapsed_time_ms);
// Write image array into a bmp file
stbi_write_bmp("./samples/out_img.bmp", width, height, 1, image_e);
printf("\nEnchanced image successfully saved.\n\n");
//print minimum and maximum value
printf("Minimum Pixel Value: %d\n", min_t);
printf("Maximum Pixel Value: %d\n", max_t);
return 0;
}
|
83f4239aebee83f927bf962ffd8ea18379c4bb79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "random_op_impl_hip.cuh"
template <typename T>
__global__ void NormalKernel(int seed, hiprandState_t *globalState, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
hiprand_init(seed, i, 0, &globalState[i]);
output[i] = (T)hiprand_normal(&globalState[i]);
}
return;
}
__device__ bool dev_error_res = false;
template <typename T>
__global__ void UniformIntKernel(int seed, hiprandState_t *globalState, T *input1, size_t input_size_1,
T *input2, size_t input_size_2, T *output, size_t count) {
if (!(input1[0] < input2[0])) {
dev_error_res = false;
return;
}
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
hiprand_init(seed, i, 0, &globalState[i]);
output[i] = (T)(hiprand_uniform(&globalState[i]) * (input2[0] - input1[0])) + input1[0];
}
dev_error_res = true;
return;
}
template <typename T>
__global__ void UniformRealKernel(int seed, hiprandState_t *globalState, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
hiprand_init(seed, i, 0, &globalState[i]);
output[i] = (T)hiprand_uniform(&globalState[i]);
}
return;
}
template <typename T>
void StandardNormal(int seed, int seed2, hiprandState_t *globalState, T *output, size_t count, hipStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
hipLaunchKernelGGL(( NormalKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, RNG_seed, globalState, output, count);
return;
}
template <typename T>
bool UniformInt(int seed, int seed2, hiprandState_t *globalState, T *input1, size_t input_size_1,
T *input2, size_t input_size_2, T *output, size_t count, hipStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
bool host_error_res = false;
hipLaunchKernelGGL(( UniformIntKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream,
RNG_seed, globalState, input1, input_size_1, input2, input_size_2, output, count);
hipMemcpyFromSymbol(&host_error_res, dev_error_res, sizeof(bool));
return host_error_res;
}
template <typename T>
void UniformReal(int seed, int seed2, hiprandState_t *globalState, T *output, size_t count, hipStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
hipLaunchKernelGGL(( UniformRealKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, RNG_seed, globalState, output, count);
return;
}
template void StandardNormal<float>(int seed, int seed2, hiprandState_t *globalState,
float *output, size_t count, hipStream_t cuda_stream);
template void StandardNormal<int>(int seed, int seed2, hiprandState_t *globalState,
int *output, size_t count, hipStream_t cuda_stream);
template bool UniformInt<float>(int seed, int seed2, hiprandState_t *globalState, float *input1, size_t input_size_1,
float *input2, size_t input_size_2, float *output, size_t count,
hipStream_t cuda_stream);
template bool UniformInt<int>(int seed, int seed2, hiprandState_t *globalState, int *input1, size_t input_size_1,
int *input2, size_t input_size_2, int *output, size_t count,
hipStream_t cuda_stream);
template void UniformReal<float>(int seed, int seed2, hiprandState_t *globalState,
float *output, size_t count, hipStream_t cuda_stream);
template void UniformReal<int>(int seed, int seed2, hiprandState_t *globalState,
int *output, size_t count, hipStream_t cuda_stream);
| 83f4239aebee83f927bf962ffd8ea18379c4bb79.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "random_op_impl.cuh"
template <typename T>
__global__ void NormalKernel(int seed, curandState *globalState, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
curand_init(seed, i, 0, &globalState[i]);
output[i] = (T)curand_normal(&globalState[i]);
}
return;
}
__device__ bool dev_error_res = false;
template <typename T>
__global__ void UniformIntKernel(int seed, curandState *globalState, T *input1, size_t input_size_1,
T *input2, size_t input_size_2, T *output, size_t count) {
if (!(input1[0] < input2[0])) {
dev_error_res = false;
return;
}
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
curand_init(seed, i, 0, &globalState[i]);
output[i] = (T)(curand_uniform(&globalState[i]) * (input2[0] - input1[0])) + input1[0];
}
dev_error_res = true;
return;
}
template <typename T>
__global__ void UniformRealKernel(int seed, curandState *globalState, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
curand_init(seed, i, 0, &globalState[i]);
output[i] = (T)curand_uniform(&globalState[i]);
}
return;
}
template <typename T>
void StandardNormal(int seed, int seed2, curandState *globalState, T *output, size_t count, cudaStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
NormalKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(RNG_seed, globalState, output, count);
return;
}
template <typename T>
bool UniformInt(int seed, int seed2, curandState *globalState, T *input1, size_t input_size_1,
T *input2, size_t input_size_2, T *output, size_t count, cudaStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
bool host_error_res = false;
UniformIntKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>
(RNG_seed, globalState, input1, input_size_1, input2, input_size_2, output, count);
cudaMemcpyFromSymbol(&host_error_res, dev_error_res, sizeof(bool));
return host_error_res;
}
template <typename T>
void UniformReal(int seed, int seed2, curandState *globalState, T *output, size_t count, cudaStream_t cuda_stream) {
int RNG_seed = 0;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = seed2;
} else if (seed != 0) {
RNG_seed = seed;
} else {
RNG_seed = static_cast<int>(rd());
}
UniformRealKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(RNG_seed, globalState, output, count);
return;
}
template void StandardNormal<float>(int seed, int seed2, curandState *globalState,
float *output, size_t count, cudaStream_t cuda_stream);
template void StandardNormal<int>(int seed, int seed2, curandState *globalState,
int *output, size_t count, cudaStream_t cuda_stream);
template bool UniformInt<float>(int seed, int seed2, curandState *globalState, float *input1, size_t input_size_1,
float *input2, size_t input_size_2, float *output, size_t count,
cudaStream_t cuda_stream);
template bool UniformInt<int>(int seed, int seed2, curandState *globalState, int *input1, size_t input_size_1,
int *input2, size_t input_size_2, int *output, size_t count,
cudaStream_t cuda_stream);
template void UniformReal<float>(int seed, int seed2, curandState *globalState,
float *output, size_t count, cudaStream_t cuda_stream);
template void UniformReal<int>(int seed, int seed2, curandState *globalState,
int *output, size_t count, cudaStream_t cuda_stream);
|
d925fc3444863f10862fb7a8a9b93ea4a21d1b37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS 16
#define PRINT_TIME 1
#define SM_ARR_LEN 2048*2048
#define TOL 1e-6
#define GIG 1000000000
#define CPG 3.07
#define IMUL(a, b) __mul24(a, b)
#define TILE_WIDTH 16
void initializeArray1D(float *arr, int len, float seed);
__global__ void MatrixMulShared(float *Md, float *Nd, float *Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; // Shared memory
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // declarations
int bx = blockIdx.x;
int by = blockIdx.y; // ID thread
int tx = threadIdx.x;
int ty = threadIdx.y; // Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// REGISTER!
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
__global__ void MMK(int width, float* Md, float* Nd, float* Pd)
{
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int k;
float Pvalue = 0.0f;
if(row < width || col < width) {
for(k = 0; k < width; k++){
Pvalue += Md[row * width + k] * Nd[k * width + col];
}
Pd[row * width + col] = Pvalue;
}
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
hipEvent_t start, stop, start2, stop2;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *Md;
float *Nd;
float *Pd;
// Arrays on the host memory
float *Md_h;
float *Pd_h;
float *Nd_h;
float *Pd_h_gold;
int i, errCount = 0, zeroCount = 0;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(hipSetDevice(1));
// Allocate GPU memory
size_t allocSize = arrLen * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&Md, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&Pd, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&Nd, allocSize));
// Allocate arrays on host memory
Pd_h = (float *) malloc(allocSize);
Pd_h_gold = (float *) malloc(allocSize);
Md_h = (float *) malloc(allocSize);
Nd_h = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(Md_h, arrLen, 24.53);
initializeArray1D(Nd_h, arrLen, 30.53);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
hipEventCreate(&start);
hipEventCreate(&stop);
// Record event on the default stream
hipEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(Md, Md_h, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Nd, Nd_h, allocSize, hipMemcpyHostToDevice));
hipEventCreate(&start2);
hipEventCreate(&stop2);
hipEventRecord(start2, 0);
dim3 dimGrid(128,128);
dim3 dimBlock(16,16);
// Launch the kernel
hipLaunchKernelGGL(( MatrixMulShared), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, 2048);
hipEventRecord(stop2,0);
hipEventSynchronize(stop2);
hipEventElapsedTime(&elapsed_gpu, start2, stop2);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
hipEventDestroy(start2);
hipEventDestroy(stop2);
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(Pd_h,Pd, allocSize, hipMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
hipEventDestroy(start);
hipEventDestroy(stop);
#endif
// Compute the results on the host
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp;
printf("Calculating Results on Host: \n");
int Width = 2048;
clock_gettime(CLOCK_REALTIME, &time1);
for (int i = 0; i < Width; ++i){
for (int j = 0; j < Width; ++j) {
float sum = 0;
for (int k = 0; k < Width; ++k) {
float a = Md_h[i*Width + k];
float b = Nd_h[k*Width + j];
sum += a * b;
}
Pd_h_gold[i * Width + j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &time2);
time_stamp = diff(time1,time2);
printf("%lf\n", ((double) (GIG * time_stamp.tv_sec + time_stamp.tv_nsec)/1000000));
// Compare the results
for(i = 0; i < arrLen; i++) {
if (abs(Pd_h_gold[i] - Pd_h[i]) > TOL) {
errCount++;
}
if (Pd_h[i] == 0) {
zeroCount++;
}
}
/*
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
*/
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(hipFree(Pd));
CUDA_SAFE_CALL(hipFree(Md));
CUDA_SAFE_CALL(hipFree(Nd));
free(Pd_h);
free(Md_h);
free(Nd_h);
return 0;
}
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void initializeArray1D(float *arr, int len, float seed) {
int i;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
randNum = (float) (rand()%10000);
arr[i] = randNum;
}
}
| d925fc3444863f10862fb7a8a9b93ea4a21d1b37.cu | #include <cstdio>
#include <cstdlib>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS 16
#define PRINT_TIME 1
#define SM_ARR_LEN 2048*2048
#define TOL 1e-6
#define GIG 1000000000
#define CPG 3.07
#define IMUL(a, b) __mul24(a, b)
#define TILE_WIDTH 16
void initializeArray1D(float *arr, int len, float seed);
__global__ void MatrixMulShared(float *Md, float *Nd, float *Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; // Shared memory
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // declarations
int bx = blockIdx.x;
int by = blockIdx.y; // ID thread
int tx = threadIdx.x;
int ty = threadIdx.y; // Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// REGISTER!
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
__global__ void MMK(int width, float* Md, float* Nd, float* Pd)
{
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int k;
float Pvalue = 0.0f;
if(row < width || col < width) {
for(k = 0; k < width; k++){
Pvalue += Md[row * width + k] * Nd[k * width + col];
}
Pd[row * width + col] = Pvalue;
}
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
cudaEvent_t start, stop, start2, stop2;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *Md;
float *Nd;
float *Pd;
// Arrays on the host memory
float *Md_h;
float *Pd_h;
float *Nd_h;
float *Pd_h_gold;
int i, errCount = 0, zeroCount = 0;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(1));
// Allocate GPU memory
size_t allocSize = arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&Md, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&Pd, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&Nd, allocSize));
// Allocate arrays on host memory
Pd_h = (float *) malloc(allocSize);
Pd_h_gold = (float *) malloc(allocSize);
Md_h = (float *) malloc(allocSize);
Nd_h = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(Md_h, arrLen, 24.53);
initializeArray1D(Nd_h, arrLen, 30.53);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(Md, Md_h, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Nd, Nd_h, allocSize, cudaMemcpyHostToDevice));
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2, 0);
dim3 dimGrid(128,128);
dim3 dimBlock(16,16);
// Launch the kernel
MatrixMulShared<<<dimGrid, dimBlock>>>(Md, Nd, Pd, 2048);
cudaEventRecord(stop2,0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&elapsed_gpu, start2, stop2);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start2);
cudaEventDestroy(stop2);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(Pd_h,Pd, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
// Compute the results on the host
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp;
printf("Calculating Results on Host: \n");
int Width = 2048;
clock_gettime(CLOCK_REALTIME, &time1);
for (int i = 0; i < Width; ++i){
for (int j = 0; j < Width; ++j) {
float sum = 0;
for (int k = 0; k < Width; ++k) {
float a = Md_h[i*Width + k];
float b = Nd_h[k*Width + j];
sum += a * b;
}
Pd_h_gold[i * Width + j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &time2);
time_stamp = diff(time1,time2);
printf("%lf\n", ((double) (GIG * time_stamp.tv_sec + time_stamp.tv_nsec)/1000000));
// Compare the results
for(i = 0; i < arrLen; i++) {
if (abs(Pd_h_gold[i] - Pd_h[i]) > TOL) {
errCount++;
}
if (Pd_h[i] == 0) {
zeroCount++;
}
}
/*
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
*/
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(Pd));
CUDA_SAFE_CALL(cudaFree(Md));
CUDA_SAFE_CALL(cudaFree(Nd));
free(Pd_h);
free(Md_h);
free(Nd_h);
return 0;
}
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void initializeArray1D(float *arr, int len, float seed) {
int i;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
randNum = (float) (rand()%10000);
arr[i] = randNum;
}
}
|
ac58d6faa419ab21fb34f0b13c3c08fde7800f57.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define ITER_NUM 100
__global__ void iKernel(float *src, float *dst) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
__global__ void reduceKernel(float *src, float *dst) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
src[idx] += dst[idx];
}
inline bool isCapableP2P(int ngpus) {
hipDeviceProp_t prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++) {
CHECK(hipGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2)
iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n", i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus) {
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
inline bool isUnifiedAddressingSupported(int ngpus) {
hipDeviceProp_t prop[ngpus];
bool iuva;
for (int i = 0; i < ngpus; i++) {
CHECK(hipGetDeviceProperties(&prop[i], i));
iuva &= prop[i].unifiedAddressing;
printf("> GPU%i: %s %s unified addressing\n", i, prop[0].name,
(prop[i].unifiedAddressing ? "supports" : "does not support"));
}
return iuva;
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later)).
*/
inline void enableP2P(int ngpus) {
for (int i = 0; i < ngpus; i++) {
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++) {
if (i == j)
continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) {
CHECK(hipDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
} else {
printf("(%d, %d)\n", i, j);
}
}
}
}
inline void disableP2P(int ngpus) {
for (int i = 0; i < ngpus; i++) {
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++) {
if (i == j)
continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) {
CHECK(hipDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
void initialData(float *ip, int size) {
for (int i = 0; i < size; i++) {
ip[i] = (float) rand() / (float) RAND_MAX;
}
}
void test_method1() {
}
int main(int argc, char **argv) {
int ngpus;
// check device count
CHECK(hipGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
//isCapableP2P(ngpus);
//isUnifiedAddressingSupported(ngpus);
// get ngpus from command line
if (argc > 1) {
if (atoi(argv[1]) > ngpus) {
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
return 1;
} else if (atoi(argv[1]) < 1) {
fprintf(stderr, "Invalid number of GPUs specified: %d is less "
"than 1 in this platform (%d)\n", atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus % 2) {
fprintf(stderr, "The number of GPUs must be odd one\n");
return 1;
}
enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **) malloc(sizeof(float) * ngpus);
float **d_rcv = (float **) malloc(sizeof(float) * ngpus);
float **h_src = (float **) malloc(sizeof(float) * ngpus);
// We have n phases
hipStream_t *stream = (hipStream_t *) malloc(sizeof(hipStream_t) * ngpus);
// Create CUDA event handles
hipEvent_t start, stop;
CHECK(hipSetDevice(0));
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
for (int i = 0; i < ngpus; i++) {
CHECK(hipSetDevice(i));
CHECK(hipMalloc(&d_src[i], iBytes));
CHECK(hipMalloc(&d_rcv[i], iBytes));
CHECK(hipStreamCreate(&stream[i]));
CHECK(hipHostMalloc((void **) &h_src[i], iBytes));
}
for (int i = 0; i < ngpus; i++) {
initialData(h_src[i], iSize);
}
const dim3 block(512);
const dim3 grid(iSize / block.x);
/*** Method 1 ***/
// Asynchronous GPUmem copy by pairs
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < ITER_NUM; i++)
{
// Phase 1
for (int dev = 0; dev < ngpus; dev++)
{
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(hipMemcpyPeerAsync(d_src[dev+1], dev+1, d_rcv[dev], dev, iBytes, stream[dev]));
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(hipStreamSynchronize(stream[dev]));
}
}
// Do kernel function
for (int dev = 0; dev < ngpus; dev++) {
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(hipSetDevice(dev+1));
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), iBytes, stream[0], d_src[dev+1], d_rcv[dev+1]);
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++) {
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(hipStreamSynchronize(stream[dev]));
}
}
// Phase 2
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(hipMemcpyPeerAsync(d_src[dev], dev+2, d_rcv[dev + 2], dev, iBytes, stream[dev]));
} /*else if ((dev % 2) && (dev == (ngpus - 1))) {
CHECK(hipMemcpyPeerAsync(d_src[dev], 1, d_rcv[0], 0, iBytes, stream[dev]));
}*/
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(hipMemcpyPeerAsync(d_src[dev], dev+2, d_rcv[dev + 2], dev, iBytes, stream[dev]));
}
}
// Do kernel function
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(hipSetDevice(dev+2));
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), iBytes, stream[dev+2], d_src[dev+2], d_rcv[dev+2]);
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(hipStreamSynchronize(stream[dev+2]));
}
}
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsed_time_ms = 0.0f;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= (float)ITER_NUM;
printf("2 phases hipMemcpyPeerAsync time per cycle:\t %8.2fms\n", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float) iBytes * 4.0 / (elapsed_time_ms * 1e6f));
/*** Method 2 ***/
// Asynchronous GPUmem copy
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < ITER_NUM; i++) {
for (int dev = 0; dev < ngpus; dev++) {
// Do ring async memory copy
if ((dev != (ngpus - 1))) {
CHECK(hipMemcpyPeerAsync(d_src[dev], dev+1, d_rcv[dev + 1], dev, iBytes, stream[dev]));
} else if ((dev == (ngpus - 1))) {
CHECK(hipMemcpyPeerAsync(d_src[dev], 0, d_rcv[0], dev, iBytes, stream[dev]));
}
CHECK(hipStreamSynchronize(stream[dev]));
// Do stream sync
if ((dev != (ngpus - 1))) {
CHECK(hipSetDevice(dev+1));
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), iBytes, stream[dev+1], d_src[dev+1], d_rcv[dev+1]);
CHECK(hipStreamSynchronize(stream[dev]));
} else if ((dev == (ngpus - 1))) {
CHECK(hipSetDevice(0));
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), iBytes, stream[0], d_src[0], d_rcv[0]);
CHECK(hipStreamSynchronize(stream[0]));
}
}
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= (float)ITER_NUM;
printf("Ring hipMemcpyPeerAsync time per cycle:\t %8.2fms\n", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float) iBytes * 4.0 / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free
CHECK(hipSetDevice(0));
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
for (int i = 0; i < ngpus; i++) {
CHECK(hipSetDevice(i));
CHECK(hipFree(h_src[i]));
CHECK(hipFree(d_src[i]));
CHECK(hipFree(d_rcv[i]));
CHECK(hipStreamDestroy(stream[i]));
CHECK(hipDeviceReset());
}
exit (EXIT_SUCCESS);
}
| ac58d6faa419ab21fb34f0b13c3c08fde7800f57.cu | #include "common.h"
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define ITER_NUM 100
__global__ void iKernel(float *src, float *dst) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
__global__ void reduceKernel(float *src, float *dst) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
src[idx] += dst[idx];
}
inline bool isCapableP2P(int ngpus) {
cudaDeviceProp prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++) {
CHECK(cudaGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2)
iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n", i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus) {
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
inline bool isUnifiedAddressingSupported(int ngpus) {
cudaDeviceProp prop[ngpus];
bool iuva;
for (int i = 0; i < ngpus; i++) {
CHECK(cudaGetDeviceProperties(&prop[i], i));
iuva &= prop[i].unifiedAddressing;
printf("> GPU%i: %s %s unified addressing\n", i, prop[0].name,
(prop[i].unifiedAddressing ? "supports" : "does not support"));
}
return iuva;
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later)).
*/
inline void enableP2P(int ngpus) {
for (int i = 0; i < ngpus; i++) {
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++) {
if (i == j)
continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) {
CHECK(cudaDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
} else {
printf("(%d, %d)\n", i, j);
}
}
}
}
inline void disableP2P(int ngpus) {
for (int i = 0; i < ngpus; i++) {
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++) {
if (i == j)
continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available) {
CHECK(cudaDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
void initialData(float *ip, int size) {
for (int i = 0; i < size; i++) {
ip[i] = (float) rand() / (float) RAND_MAX;
}
}
void test_method1() {
}
int main(int argc, char **argv) {
int ngpus;
// check device count
CHECK(cudaGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
//isCapableP2P(ngpus);
//isUnifiedAddressingSupported(ngpus);
// get ngpus from command line
if (argc > 1) {
if (atoi(argv[1]) > ngpus) {
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
return 1;
} else if (atoi(argv[1]) < 1) {
fprintf(stderr, "Invalid number of GPUs specified: %d is less "
"than 1 in this platform (%d)\n", atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus % 2) {
fprintf(stderr, "The number of GPUs must be odd one\n");
return 1;
}
enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **) malloc(sizeof(float) * ngpus);
float **d_rcv = (float **) malloc(sizeof(float) * ngpus);
float **h_src = (float **) malloc(sizeof(float) * ngpus);
// We have n phases
cudaStream_t *stream = (cudaStream_t *) malloc(sizeof(cudaStream_t) * ngpus);
// Create CUDA event handles
cudaEvent_t start, stop;
CHECK(cudaSetDevice(0));
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
for (int i = 0; i < ngpus; i++) {
CHECK(cudaSetDevice(i));
CHECK(cudaMalloc(&d_src[i], iBytes));
CHECK(cudaMalloc(&d_rcv[i], iBytes));
CHECK(cudaStreamCreate(&stream[i]));
CHECK(cudaMallocHost((void **) &h_src[i], iBytes));
}
for (int i = 0; i < ngpus; i++) {
initialData(h_src[i], iSize);
}
const dim3 block(512);
const dim3 grid(iSize / block.x);
/*** Method 1 ***/
// Asynchronous GPUmem copy by pairs
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < ITER_NUM; i++)
{
// Phase 1
for (int dev = 0; dev < ngpus; dev++)
{
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(cudaMemcpyPeerAsync(d_src[dev+1], dev+1, d_rcv[dev], dev, iBytes, stream[dev]));
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(cudaStreamSynchronize(stream[dev]));
}
}
// Do kernel function
for (int dev = 0; dev < ngpus; dev++) {
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(cudaSetDevice(dev+1));
reduceKernel<<<grid, block, iBytes, stream[0]>>>(d_src[dev+1], d_rcv[dev+1]);
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++) {
if (!(dev % 2)) // even number, for instance: 0, 2, 4,...
{
CHECK(cudaStreamSynchronize(stream[dev]));
}
}
// Phase 2
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(cudaMemcpyPeerAsync(d_src[dev], dev+2, d_rcv[dev + 2], dev, iBytes, stream[dev]));
} /*else if ((dev % 2) && (dev == (ngpus - 1))) {
CHECK(cudaMemcpyPeerAsync(d_src[dev], 1, d_rcv[0], 0, iBytes, stream[dev]));
}*/
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(cudaMemcpyPeerAsync(d_src[dev], dev+2, d_rcv[dev + 2], dev, iBytes, stream[dev]));
}
}
// Do kernel function
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(cudaSetDevice(dev+2));
reduceKernel<<<grid, block, iBytes, stream[dev+2]>>>(d_src[dev+2], d_rcv[dev+2]);
}
}
// Do stream sync
for (int dev = 0; dev < ngpus; dev++)
{
if ((dev % 2) && (dev != (ngpus - 1))) // even number, for instance: 1, 3, 5,...
{
CHECK(cudaStreamSynchronize(stream[dev+2]));
}
}
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsed_time_ms = 0.0f;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= (float)ITER_NUM;
printf("2 phases cudaMemcpyPeerAsync time per cycle:\t %8.2fms\n", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float) iBytes * 4.0 / (elapsed_time_ms * 1e6f));
/*** Method 2 ***/
// Asynchronous GPUmem copy
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < ITER_NUM; i++) {
for (int dev = 0; dev < ngpus; dev++) {
// Do ring async memory copy
if ((dev != (ngpus - 1))) {
CHECK(cudaMemcpyPeerAsync(d_src[dev], dev+1, d_rcv[dev + 1], dev, iBytes, stream[dev]));
} else if ((dev == (ngpus - 1))) {
CHECK(cudaMemcpyPeerAsync(d_src[dev], 0, d_rcv[0], dev, iBytes, stream[dev]));
}
CHECK(cudaStreamSynchronize(stream[dev]));
// Do stream sync
if ((dev != (ngpus - 1))) {
CHECK(cudaSetDevice(dev+1));
reduceKernel<<<grid, block, iBytes, stream[dev+1]>>>(d_src[dev+1], d_rcv[dev+1]);
CHECK(cudaStreamSynchronize(stream[dev]));
} else if ((dev == (ngpus - 1))) {
CHECK(cudaSetDevice(0));
reduceKernel<<<grid, block, iBytes, stream[0]>>>(d_src[0], d_rcv[0]);
CHECK(cudaStreamSynchronize(stream[0]));
}
}
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= (float)ITER_NUM;
printf("Ring cudaMemcpyPeerAsync time per cycle:\t %8.2fms\n", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float) iBytes * 4.0 / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free
CHECK(cudaSetDevice(0));
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
for (int i = 0; i < ngpus; i++) {
CHECK(cudaSetDevice(i));
CHECK(cudaFree(h_src[i]));
CHECK(cudaFree(d_src[i]));
CHECK(cudaFree(d_rcv[i]));
CHECK(cudaStreamDestroy(stream[i]));
CHECK(cudaDeviceReset());
}
exit (EXIT_SUCCESS);
}
|
214053476dfcf0c5d69ad85751e91658e5b877a4.hip | // !!! This is a file automatically generated by hipify!!!
// codes for reduction
#ifndef __REDUCTION_CU
#define __REDUCTION_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cutil_math.h>
#include "tomo_recon.h"
__global__ void reduction_kernel( float*, float*, int, int, int );
__global__ void reduction2_kernel( float*, float*, float*, int, int, int );
__global__ void reduction2_kernel_2D( float* , float* , float* ,
int , int , int );
// dynamic shared memory allocation does not work well yet. Y. Pan, 5/11/2011
// // Utility class used to avoid linker errors with extern
// // unsized shared memory arrays with templated type
// template<class T>
// struct SharedMemory
// {
// __device__ inline operator T*()
// {
// extern __shared__ float __smem[];
// return (T*)__smem;
// }
// __device__ inline operator const T*() const
// {
// extern __shared__ float __smem[];
// return (T*)__smem;
// }
// };
// // specialize for double to avoid unaligned memory
// // access compile errors
// template<>
// struct SharedMemory<double>
// {
// __device__ inline operator double*()
// {
// extern __shared__ double __smem_d[];
// return (double*)__smem_d;
// }
// __device__ inline operator const double*() const
// {
// extern __shared__ double __smem_d[];
// return (double*)__smem_d;
// }
// };
extern "C"
int nextPow2( int x ) { // x should better be "unsigned int"
if( x < 0 ){
return 0;
}
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
extern "C"
int isPow2(int x) { // x should better be "unsigned int"
if( x < 0 ){
return 0;
}
if( (x&(x-1))==0 )
return 1;
else
return 0;
}
extern "C"
float reduction_wrapper( float* d_in, float* d_sub, int nx, int ny, int nz ){
int maxThreads;
int nVolume = nx * ny * nz;
if(ny == 1 || nz == 1) {
maxThreads = BLOCK_2DIM_X * BLOCK_2DIM_Y * BLOCK_2DIM_Z;
}
else {
maxThreads = BLOCK_3DIM_X * BLOCK_3DIM_Y * BLOCK_3DIM_Z;
}
int nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
int nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
hipLaunchKernelGGL(( reduction_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_in, d_sub, nVolume, nThreads, nIsPow2 );
nVolume = nBlocks;
while ( nVolume > 1 ){
nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// execute the kernel
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
nIsPow2 = isPow2( nVolume );
hipLaunchKernelGGL(( reduction_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_sub, d_sub, nVolume, nThreads, nIsPow2);
CUT_CHECK_ERROR("Kernel execution failed");
nVolume = (nVolume + (nThreads*2 - 1)) / (nThreads * 2);
}
// extract reduction results
float out = 0.0f;
cutilSafeCall( hipMemcpy( &out, d_sub, sizeof(float), hipMemcpyDeviceToHost) );
return out;
}
__global__ void reduction_kernel( float* d_idata, float* d_odata, int n,
int blockSize, int nIsPow2 ){
__shared__ float sdata[ THREADS_MAX ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
float mySum = 0.0;
// reduction kernel 5
if (i < n){
mySum += fabsf( d_idata[i] );
}
if (i + blockSize < n) {
mySum += fabsf( d_idata[i+blockSize] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata[i] ); // d_idata[i] * d_idata[i];
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata[i+blockSize] ); // d_idata[i+blockSize] * d_idata[i+blockSize];
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
// sdata[tid] = mySum = mySum + sdata[tid + 512];
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
// sdata[tid] = mySum = mySum + sdata[tid + 256];
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
// sdata[tid] = mySum = mySum + sdata[tid + 128];
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
// sdata[tid] = mySum = mySum + sdata[tid + 64];
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
if (blockSize >= 64) {
// smem[tid] = mySum = mySum + smem[tid + 32];
sdata[tid] += sdata[tid + 32];
}
if (blockSize >= 32) {
// smem[tid] = mySum = mySum + smem[tid + 16];
sdata[tid] += sdata[tid + 16];
}
if (blockSize >= 16) {
// smem[tid] = mySum = mySum + smem[tid + 8];
sdata[tid] += sdata[tid + 8];
}
if (blockSize >= 8) {
// smem[tid] = mySum = mySum + smem[tid + 4];
sdata[tid] += sdata[tid + 4];
}
if (blockSize >= 4) {
// smem[tid] = mySum = mySum + smem[tid + 2];
sdata[tid] += sdata[tid + 2];
}
if (blockSize >= 2) {
// smem[tid] = mySum = mySum + smem[tid + 1];
sdata[tid] += sdata[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0];
}
// -----------------------------------------------------------------------------------------
extern "C"
float reduction2_wrapper( float* d_in1, float* d_in2, float* d_sub, int nx, int ny, int nz ){
int maxThreads = THREADS_MAX;
int nVolume = nx * ny * nz;
// 1D reduction kernel
int nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
int nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// int nThreads = (nVolume < maxThreads) ? nextPow2(nVolume) : maxThreads;
// int nBlocks = (nVolume + (nThreads - 1) ) / (nThreads);
// Note that the dimension of grid is limited to 65535 * 65535 * 1
dim3 dimGrid(nBlocks, 1);
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
hipLaunchKernelGGL(( reduction2_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_in1, d_in2, d_sub,
nVolume, nThreads, nIsPow2);
// // for debug
// float* tmp = (float*) malloc( nBlocks * sizeof( float ) );
// hipMemcpy( tmp, d_sub, sizeof(float) * nBlocks, hipMemcpyDeviceToHost );
// int i;
// float total = 0.0f;
// for( i = 0; i < nBlocks; i++ ){
// total += tmp[i];
// if( isnan( total ) ){
// total -= tmp[i];
// }
// }
nVolume = nBlocks;
while ( nVolume > 1 ){
nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// execute the kernel
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
hipLaunchKernelGGL(( reduction_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_sub, d_sub, nVolume, nThreads, nIsPow2);
CUT_CHECK_ERROR("Kernel execution failed");
nVolume = (nVolume + (nThreads*2 - 1)) / (nThreads * 2);
}
// extract reduction results
float out = 0.0f;
cutilSafeCall( hipMemcpy( &out, d_sub, sizeof(float), hipMemcpyDeviceToHost) );
return out;
// Weird: somehow only part of blocks are computed, corresponding to the first
// dimension in dimGrid. For example, only 256 blocks for dimGrid(256, 256).
// Yongsheng Pan, 5/12/2011
// 2D reduction kernel. Kept for future use.
// int blockWidth = BLOCK_2DIM_X; // 16
// int blockHeight = BLOCK_2DIM_Y; // 16
// int nBlockX = (int)ceil( 1.0 * nx * ny / blockWidth );
// int nBlockY = (int)ceil( 1.0 * nz / blockHeight );
// dim3 dimGrid(nBlockX, nBlockY, 1);
// dim3 dimBlock(blockWidth, blockHeight);
// int nBlocks = nBlockX * nBlockY;
// int nThreads = blockWidth * blockHeight;
// reduction2_kernel_2D<<< dimGrid, dimBlock >>>( d_in1, d_in2, d_sub,
// nVolume, nx * ny, 1);
// CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void reduction2_kernel( float* d_idata1, float* d_idata2, float* d_odata,
int n, int blockSize, int nIsPow2 ){
__shared__ float sdata[ THREADS_MAX ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
float mySum = 0.0f;
// reduction kernel 5
if (i < n ){
mySum = fabsf( d_idata1[i] - d_idata2[i] );
}
if (1.0f * i + blockSize < 1.0f * n - 0.5f) {
mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
// sdata[tid] = mySum = mySum + sdata[tid + 512];
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
// sdata[tid] = mySum = mySum + sdata[tid + 256];
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
// sdata[tid] = mySum = mySum + sdata[tid + 128];
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
// sdata[tid] = mySum = mySum + sdata[tid + 64];
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
// if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
// if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
// if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
// if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
// if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
// if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
if (blockSize >= 64) {
// smem[tid] = mySum = mySum + smem[tid + 32];
sdata[tid] += sdata[tid + 32];
}
if (blockSize >= 32) {
// smem[tid] = mySum = mySum + smem[tid + 16];
sdata[tid] += sdata[tid + 16];
}
if (blockSize >= 16) {
// smem[tid] = mySum = mySum + smem[tid + 8];
sdata[tid] += sdata[tid + 8];
}
if (blockSize >= 8) {
// smem[tid] = mySum = mySum + smem[tid + 4];
sdata[tid] += sdata[tid + 4];
}
if (blockSize >= 4) {
// smem[tid] = mySum = mySum + smem[tid + 2];
sdata[tid] += sdata[tid + 2];
}
if (blockSize >= 2) {
// smem[tid] = mySum = mySum + smem[tid + 1];
sdata[tid] += sdata[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0];
}
__global__ void reduction2_kernel_2D( float* d_idata1, float* d_idata2, float* d_odata,
int n, int nx, int nIsPow2 ){
__shared__ float sdata[ BLOCK_2DIM_Y ][ BLOCK_2DIM_X ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tidx = threadIdx.x;
unsigned int tidy = threadIdx.y;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
float mySum = 0.0f;
int blockSize = blockDim.x * blockDim.y;
// reduction kernel 5
if ( idy * nx + idx < n ){
mySum = fabsf( d_idata1[idy * nx + idx] - d_idata2[idy * nx + idx] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tidy][tidx] = mySum;
__syncthreads();
unsigned int tid = tidy * blockDim.x + tidx;
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
sdata[tidy][tidx] += sdata[tidy + 32][tidx];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
sdata[tidy][tidx] += sdata[tidy + 16][tidx];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tidy][tidx] += sdata[tidy + 8][tidx];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tidy][tidx] += sdata[tidy + 4][tidx];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
// if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
// if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
// if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
// if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
// if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
// if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
if (blockSize >= 64) {
sdata[tidy][tidx] += sdata[tidy + 2][tidx];
}
if (blockSize >= 32) {
sdata[tidy][tidx] += sdata[tidy + 1][tidx];
}
if (blockSize >= 16) {
sdata[tidy][tidx] += sdata[tidy][tidx + 8];
}
if (blockSize >= 8) {
sdata[tidy][tidx] += sdata[tidy][tidx + 4];
}
if (blockSize >= 4) {
sdata[tidy][tidx] += sdata[tidy][tidx + 2];
}
if (blockSize >= 2) {
sdata[tidy][tidx] += sdata[tidy][tidx + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0][0];
}
#endif // __REDUCTION_CU
| 214053476dfcf0c5d69ad85751e91658e5b877a4.cu | // codes for reduction
#ifndef __REDUCTION_CU
#define __REDUCTION_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cutil_math.h>
#include "tomo_recon.h"
__global__ void reduction_kernel( float*, float*, int, int, int );
__global__ void reduction2_kernel( float*, float*, float*, int, int, int );
__global__ void reduction2_kernel_2D( float* , float* , float* ,
int , int , int );
// dynamic shared memory allocation does not work well yet. Y. Pan, 5/11/2011
// // Utility class used to avoid linker errors with extern
// // unsized shared memory arrays with templated type
// template<class T>
// struct SharedMemory
// {
// __device__ inline operator T*()
// {
// extern __shared__ float __smem[];
// return (T*)__smem;
// }
// __device__ inline operator const T*() const
// {
// extern __shared__ float __smem[];
// return (T*)__smem;
// }
// };
// // specialize for double to avoid unaligned memory
// // access compile errors
// template<>
// struct SharedMemory<double>
// {
// __device__ inline operator double*()
// {
// extern __shared__ double __smem_d[];
// return (double*)__smem_d;
// }
// __device__ inline operator const double*() const
// {
// extern __shared__ double __smem_d[];
// return (double*)__smem_d;
// }
// };
extern "C"
int nextPow2( int x ) { // x should better be "unsigned int"
if( x < 0 ){
return 0;
}
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
extern "C"
int isPow2(int x) { // x should better be "unsigned int"
if( x < 0 ){
return 0;
}
if( (x&(x-1))==0 )
return 1;
else
return 0;
}
extern "C"
float reduction_wrapper( float* d_in, float* d_sub, int nx, int ny, int nz ){
int maxThreads;
int nVolume = nx * ny * nz;
if(ny == 1 || nz == 1) {
maxThreads = BLOCK_2DIM_X * BLOCK_2DIM_Y * BLOCK_2DIM_Z;
}
else {
maxThreads = BLOCK_3DIM_X * BLOCK_3DIM_Y * BLOCK_3DIM_Z;
}
int nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
int nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
reduction_kernel<<< dimGrid, dimBlock >>>( d_in, d_sub, nVolume, nThreads, nIsPow2 );
nVolume = nBlocks;
while ( nVolume > 1 ){
nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// execute the kernel
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
nIsPow2 = isPow2( nVolume );
reduction_kernel<<< dimGrid, dimBlock >>>( d_sub, d_sub, nVolume, nThreads, nIsPow2);
CUT_CHECK_ERROR("Kernel execution failed");
nVolume = (nVolume + (nThreads*2 - 1)) / (nThreads * 2);
}
// extract reduction results
float out = 0.0f;
cutilSafeCall( cudaMemcpy( &out, d_sub, sizeof(float), cudaMemcpyDeviceToHost) );
return out;
}
__global__ void reduction_kernel( float* d_idata, float* d_odata, int n,
int blockSize, int nIsPow2 ){
__shared__ float sdata[ THREADS_MAX ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
float mySum = 0.0;
// reduction kernel 5
if (i < n){
mySum += fabsf( d_idata[i] );
}
if (i + blockSize < n) {
mySum += fabsf( d_idata[i+blockSize] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata[i] ); // d_idata[i] * d_idata[i];
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata[i+blockSize] ); // d_idata[i+blockSize] * d_idata[i+blockSize];
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
// sdata[tid] = mySum = mySum + sdata[tid + 512];
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
// sdata[tid] = mySum = mySum + sdata[tid + 256];
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
// sdata[tid] = mySum = mySum + sdata[tid + 128];
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
// sdata[tid] = mySum = mySum + sdata[tid + 64];
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
if (blockSize >= 64) {
// smem[tid] = mySum = mySum + smem[tid + 32];
sdata[tid] += sdata[tid + 32];
}
if (blockSize >= 32) {
// smem[tid] = mySum = mySum + smem[tid + 16];
sdata[tid] += sdata[tid + 16];
}
if (blockSize >= 16) {
// smem[tid] = mySum = mySum + smem[tid + 8];
sdata[tid] += sdata[tid + 8];
}
if (blockSize >= 8) {
// smem[tid] = mySum = mySum + smem[tid + 4];
sdata[tid] += sdata[tid + 4];
}
if (blockSize >= 4) {
// smem[tid] = mySum = mySum + smem[tid + 2];
sdata[tid] += sdata[tid + 2];
}
if (blockSize >= 2) {
// smem[tid] = mySum = mySum + smem[tid + 1];
sdata[tid] += sdata[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0];
}
// -----------------------------------------------------------------------------------------
extern "C"
float reduction2_wrapper( float* d_in1, float* d_in2, float* d_sub, int nx, int ny, int nz ){
int maxThreads = THREADS_MAX;
int nVolume = nx * ny * nz;
// 1D reduction kernel
int nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
int nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// int nThreads = (nVolume < maxThreads) ? nextPow2(nVolume) : maxThreads;
// int nBlocks = (nVolume + (nThreads - 1) ) / (nThreads);
// Note that the dimension of grid is limited to 65535 * 65535 * 1
dim3 dimGrid(nBlocks, 1);
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
reduction2_kernel<<< dimGrid, dimBlock >>>( d_in1, d_in2, d_sub,
nVolume, nThreads, nIsPow2);
// // for debug
// float* tmp = (float*) malloc( nBlocks * sizeof( float ) );
// cudaMemcpy( tmp, d_sub, sizeof(float) * nBlocks, cudaMemcpyDeviceToHost );
// int i;
// float total = 0.0f;
// for( i = 0; i < nBlocks; i++ ){
// total += tmp[i];
// if( isnan( total ) ){
// total -= tmp[i];
// }
// }
nVolume = nBlocks;
while ( nVolume > 1 ){
nThreads = (nVolume < maxThreads * 2) ? nextPow2((nVolume + 1)/ 2) : maxThreads;
nBlocks = (nVolume + (nThreads * 2 - 1) ) / (nThreads * 2);
// execute the kernel
dim3 dimGrid(nBlocks, 1); // 3D grid is not supported on G80
dim3 dimBlock(nThreads, 1, 1);
int nIsPow2 = isPow2( nVolume );
reduction_kernel<<< dimGrid, dimBlock >>>( d_sub, d_sub, nVolume, nThreads, nIsPow2);
CUT_CHECK_ERROR("Kernel execution failed");
nVolume = (nVolume + (nThreads*2 - 1)) / (nThreads * 2);
}
// extract reduction results
float out = 0.0f;
cutilSafeCall( cudaMemcpy( &out, d_sub, sizeof(float), cudaMemcpyDeviceToHost) );
return out;
// Weird: somehow only part of blocks are computed, corresponding to the first
// dimension in dimGrid. For example, only 256 blocks for dimGrid(256, 256).
// Yongsheng Pan, 5/12/2011
// 2D reduction kernel. Kept for future use.
// int blockWidth = BLOCK_2DIM_X; // 16
// int blockHeight = BLOCK_2DIM_Y; // 16
// int nBlockX = (int)ceil( 1.0 * nx * ny / blockWidth );
// int nBlockY = (int)ceil( 1.0 * nz / blockHeight );
// dim3 dimGrid(nBlockX, nBlockY, 1);
// dim3 dimBlock(blockWidth, blockHeight);
// int nBlocks = nBlockX * nBlockY;
// int nThreads = blockWidth * blockHeight;
// reduction2_kernel_2D<<< dimGrid, dimBlock >>>( d_in1, d_in2, d_sub,
// nVolume, nx * ny, 1);
// CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void reduction2_kernel( float* d_idata1, float* d_idata2, float* d_odata,
int n, int blockSize, int nIsPow2 ){
__shared__ float sdata[ THREADS_MAX ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
float mySum = 0.0f;
// reduction kernel 5
if (i < n ){
mySum = fabsf( d_idata1[i] - d_idata2[i] );
}
if (1.0f * i + blockSize < 1.0f * n - 0.5f) {
mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
// sdata[tid] = mySum = mySum + sdata[tid + 512];
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
// sdata[tid] = mySum = mySum + sdata[tid + 256];
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
// sdata[tid] = mySum = mySum + sdata[tid + 128];
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
// sdata[tid] = mySum = mySum + sdata[tid + 64];
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
// if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
// if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
// if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
// if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
// if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
// if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
if (blockSize >= 64) {
// smem[tid] = mySum = mySum + smem[tid + 32];
sdata[tid] += sdata[tid + 32];
}
if (blockSize >= 32) {
// smem[tid] = mySum = mySum + smem[tid + 16];
sdata[tid] += sdata[tid + 16];
}
if (blockSize >= 16) {
// smem[tid] = mySum = mySum + smem[tid + 8];
sdata[tid] += sdata[tid + 8];
}
if (blockSize >= 8) {
// smem[tid] = mySum = mySum + smem[tid + 4];
sdata[tid] += sdata[tid + 4];
}
if (blockSize >= 4) {
// smem[tid] = mySum = mySum + smem[tid + 2];
sdata[tid] += sdata[tid + 2];
}
if (blockSize >= 2) {
// smem[tid] = mySum = mySum + smem[tid + 1];
sdata[tid] += sdata[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0];
}
__global__ void reduction2_kernel_2D( float* d_idata1, float* d_idata2, float* d_odata,
int n, int nx, int nIsPow2 ){
__shared__ float sdata[ BLOCK_2DIM_Y ][ BLOCK_2DIM_X ]; // 1024
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tidx = threadIdx.x;
unsigned int tidy = threadIdx.y;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
float mySum = 0.0f;
int blockSize = blockDim.x * blockDim.y;
// reduction kernel 5
if ( idy * nx + idx < n ){
mySum = fabsf( d_idata1[idy * nx + idx] - d_idata2[idy * nx + idx] );
}
// reduction kernel 6
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
// while (i < n){
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 == 1 || i + blockSize < n)
// mySum += fabsf( d_idata1[i+blockSize] - d_idata2[i+blockSize] );
// i += gridSize;
// }
// each thread puts its local sum into shared memory
sdata[tidy][tidx] = mySum;
__syncthreads();
unsigned int tid = tidy * blockDim.x + tidx;
// do reduction in shared mem
if (blockSize >= 1024) {
if (tid < 512) {
sdata[tidy][tidx] += sdata[tidy + 32][tidx];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
sdata[tidy][tidx] += sdata[tidy + 16][tidx];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tidy][tidx] += sdata[tidy + 8][tidx];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tidy][tidx] += sdata[tidy + 4][tidx];
}
__syncthreads();
}
if (tid < 32){
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
// volatile float* smem = sdata;
// if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
// if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
// if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
// if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
// if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
// if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
if (blockSize >= 64) {
sdata[tidy][tidx] += sdata[tidy + 2][tidx];
}
if (blockSize >= 32) {
sdata[tidy][tidx] += sdata[tidy + 1][tidx];
}
if (blockSize >= 16) {
sdata[tidy][tidx] += sdata[tidy][tidx + 8];
}
if (blockSize >= 8) {
sdata[tidy][tidx] += sdata[tidy][tidx + 4];
}
if (blockSize >= 4) {
sdata[tidy][tidx] += sdata[tidy][tidx + 2];
}
if (blockSize >= 2) {
sdata[tidy][tidx] += sdata[tidy][tidx + 1];
}
}
// write result for this block to global mem
if (tid == 0)
d_odata[blockIdx.x] = sdata[0][0];
}
#endif // __REDUCTION_CU
|
7c57eed437d18ba369b9c307ce1161f51b035904.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <array>
#include <cmath>
#include <vector>
#include <type_traits>
#include <cassert>
#include <set>
#include <algorithm>
#include <string>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
#include <sstream>
#include <hip/hip_cooperative_groups.h>
#include "device_launch_parameters.h"
#include "CudaUtils.h"
using Type = int;
using namespace thrust::placeholders;
template <typename Container>
std::string printContainer(Container const& container) {
if (container.empty()) {
return "{}";
}
std::string result = "{" + std::to_string(*(container.begin()));
if (container.size() == 1) {
return result + "}";
}
for (auto it = std::next(container.begin()); it != container.end(); ++it) {
result += "," + std::to_string(*it);
}
result += '}';
return result;
}
template <class DestContainer, class Source>
DestContainer parseTo(Source const& source) {
using Target = typename DestContainer::value_type;
std::istringstream is(source);
return DestContainer(std::istream_iterator<Target>(is), std::istream_iterator<Target>());
}
template <typename T>
void removeDuplicates(std::vector<T>& data) {
std::sort(data.begin(), data.end());
auto iter = std::unique(data.begin(), data.end());
data.resize(std::distance(data.begin(), iter));
}
template <typename T>
thrust::device_vector<T> removeDuplicates(thrust::device_vector<T> const& input) {
thrust::device_vector<T> result(input);
thrust::sort(thrust::device, result.begin(), result.end());
auto iter = thrust::unique(thrust::device, result.begin(), result.end());
result.resize(std::distance(result.begin(), iter));
return result;
}
template <typename S>
struct FoundMatcher : public thrust::unary_function<S, bool> {
FoundMatcher(S const patternSize, bool const * const found) : patternSize(patternSize), found(found) { }
__device__ __host__ bool operator()(S index) {
return found[index / patternSize];
}
S const patternSize;
bool const * const found;
};
using Block = dim3;
using Grid = dim3;
struct LaunchParameters {
int optimalBlockSize;
int optimalGridSize;
int workloadGridSize;
/** Maximum occupancy - all blocks resident on SMs**/
std::pair<Grid, Block> getOptimal() {
return { dim3(optimalGridSize), dim3(optimalBlockSize) };
}
/** Real occupancy - minimal launch configuration to handle the workload **/
std::pair<Grid, Block> getReal() {
return { dim3(workloadGridSize), dim3(optimalBlockSize) };
}
/** Real occupancy bounded by the residency requirement. No non-resident blocks will be launched **/
std::pair<Grid, Block> getRealResident() {
return { dim3(::min(optimalGridSize, workloadGridSize)), dim3(optimalBlockSize) };
}
};
template <typename Kernel>
LaunchParameters calculateOptimalLaunchParameters(Kernel kernel, std::size_t dynamicSMemSize, int blockSizeLimit) {
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
int maxActiveBlocks;
int device;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel, dynamicSMemSize, blockSizeLimit);
gridSize = (blockSizeLimit + blockSize - 1) / blockSize;
printf("[CUDA] Optimal block size: %d, grid size: %d\n", blockSize, minGridSize);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, kernel, blockSize, dynamicSMemSize);
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /(float)(props.maxThreadsPerMultiProcessor / props.warpSize);
printf("[CUDA] Theoretical occupancy assuming launching with optimal grid %d: %f\n", minGridSize, occupancy);
printf("[CUDA] Theoretical occupancy assuming launching with neccessary grid %d to handle all the work: %f\n", gridSize, occupancy * ((float) gridSize / minGridSize));
return LaunchParameters { blockSize, minGridSize, gridSize };
}
__device__ void logLaunchParameters(char const* const kernelName) {
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid == 0) {
printf("[CUDA] Invoking %s with: Block(%d,%d), Grid(%d,%d)\n", kernelName, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
template <typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ std::size_t factorial(Integral const n) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0);
#if __cplusplus >= 201703L
}
#endif
if (n == 0) {
return 1;
}
std::size_t result = 1;
for (Integral i = 2; i <= n; ++i) {
result *= i;
}
assert(result > 0 && "Overflow detected!");
return result;
}
template <typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ std::size_t variationsCount(Integral const n, Integral const k) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0 && k >= 0);
#if __cplusplus >= 201703L
}
#endif
assert(n >= k);
if (k == 0) {
return 1;
}
std::size_t result = n - k + 1;
for (std::size_t i = result + 1; i <= n; ++i) {
result *= i;
}
return result;
}
template <typename T, typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ void computeVariation(T const* const input, Integral const n, Integral const k, Integral p, T* const output) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0 && k >= 0 && k <= n && p >= 0 && p < variationsCount(n, k));
#if __cplusplus >= 201703L
}
#endif
bool* removed = new bool[n];
for (Integral i = 0; i < n; ++i) {
removed[i] = false;
}
for (Integral x = 0; x < k; ++x) {
Integral v = variationsCount(n - x - 1, k - x - 1);
Integral t = p / v;
for (Integral i = 0; i <= t; ++i) {
if (removed[i]) {
++t;
}
}
output[x] = input[t];
removed[t] = true;
p = p % v;
}
delete[] removed;
}
template <typename T, typename S>
__host__ __device__ void substitutePattern(GpuData<T, S> const& pattern, GpuData<T, S> const& distinctPattern,
T const* const variation, T* const output) {
for (S patternIndex = 0; patternIndex < pattern.length; ++patternIndex) {
T currentPatternSymbol = pattern.data[patternIndex];
// Find the substitution
for (S distinctPatternIndex = 0; distinctPatternIndex < distinctPattern.length; ++distinctPatternIndex) {
T currentDistinctPatternSymbol = distinctPattern.data[distinctPatternIndex];
if (currentDistinctPatternSymbol == currentPatternSymbol) {
output[patternIndex] = variation[distinctPatternIndex];
break;
}
}
}
}
template <typename T, typename S>
__host__ __device__ bool checkPattern(GpuData<T, S> const& sequence, GpuData<T, S> const& pattern) {
T const* sequencePtr = sequence.data;
T const* patternPtr = pattern.data;
while (sequencePtr - sequence.data < sequence.length) {
if (*patternPtr == *sequencePtr) {
++patternPtr;
if (patternPtr - pattern.data == pattern.length) {
// printf("[WorkNo %d] Matches! %d %d %d %d %d %d %d\n", workNo, pattern.data[0], pattern.data[1], pattern.data[2], pattern.data[3], pattern.data[4], pattern.data[5], pattern.data[6]);
return true;
}
}
++sequencePtr;
}
// printf("[WorkNo %d] Not matches!\n", gtid);
return false;
}
template <typename T, typename S>
__device__ bool compute(GpuData<T, S> const& sequence, GpuData<T, S> const& distinctSequence, GpuData<T, S> const& pattern,
GpuData<T, S> const& distinctPattern, S workNo, T* const outputVariations, bool* const outputFound) {
T* variation = new T[distinctPattern.length];
// Compute the variation to be checked by this thread
computeVariation(distinctSequence.data, distinctSequence.length, distinctPattern.length, workNo, variation);
T* finalPattern = outputVariations + (workNo * pattern.length);
// Assign computed values to the pattern
substitutePattern(pattern, distinctPattern, variation, finalPattern);
delete[] variation;
bool found = checkPattern(sequence, GpuData<T, S> { finalPattern, pattern.length });
outputFound[workNo] = found;
return found;
}
template <typename T, typename S>
__global__ void computeAll(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence,
GpuData<T, S> const pattern, GpuData<T, S> const distinctPattern, S const workAmount,
T* const outputVariations, bool* const outputFound) {
logLaunchParameters("computeAll");
int workNo = blockIdx.x * blockDim.x + threadIdx.x;
while (workNo < workAmount) {
compute<T, S>(sequence, distinctSequence, pattern, distinctPattern, workNo, outputVariations, outputFound);
workNo += (blockDim.x * gridDim.x);
}
}
template <typename T, typename S>
__global__ void computeAnyWrapper(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence, GpuData<T, S> const pattern,
GpuData<T, S> const distinctPattern, S const workAmount, S const iteration,
T* const outputVariations, bool* const outputFound, volatile bool* const anyFound) {
logLaunchParameters("computeAnyWrapper");
int const workNo = (blockIdx.x * blockDim.x + threadIdx.x) + (iteration * blockDim.x * gridDim.x);
if (workNo < workAmount) {
bool found = compute<T, S>(sequence, distinctSequence, pattern, distinctPattern, workNo,
outputVariations, outputFound);
if (found) {
*anyFound = true;
}
}
}
template <typename T, typename S>
__global__ void computeAny(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence, GpuData<T, S> const pattern,
GpuData<T, S> const distinctPattern, S const workAmount, int const gridSize, int const blockSize,
T* const outputVariations, bool* const outputFound, volatile bool* const anyFound) {
logLaunchParameters("computeAny");
assert(blockDim.x == 1 && gridDim.x == 1);
S iterationNo = 0;
do {
hipLaunchKernelGGL(( computeAnyWrapper<T, S>), dim3(gridSize), dim3(blockSize), 0, 0, sequence, distinctSequence, pattern, distinctPattern, workAmount,
iterationNo, outputVariations, outputFound, anyFound);
hipDeviceSynchronize();
printf("[KERNEL] Checked range [%d, %d]\n", (iterationNo) * blockSize * gridSize, (iterationNo + 1) * blockSize * gridSize - 1);
if (*anyFound) {
printf("[KERNEL] Found matching pattern!\n");
return;
}
++iterationNo;
} while (iterationNo * blockSize * gridSize < workAmount);
}
int main(int argc, char** argv) {
if (argc < 4) {
std::cerr << "Too few arguments. Usage: " << argv[0] << " <[any/all]> <pattern> <sequence>."
"Pattern and sequence should be quoted lists of integers seperated by whitespaces.";
exit(1);
}
std::string const mode(argv[1]);
auto const devPattern = parseTo<thrust::device_vector<Type>>(argv[2]);
auto const devSequence = parseTo<thrust::device_vector<Type>>(argv[3]);
assert(devPattern.size() <= devSequence.size());
auto const devDistinctPattern = removeDuplicates(devPattern);
auto const devDistinctSequence = removeDuplicates(devSequence);
Type workAmount = variationsCount(devDistinctSequence.size(), devDistinctPattern.size());
std::cout << "[INFO] Variations to check: " << workAmount << std::endl;
thrust::device_vector<Type> devOutputVariations(workAmount * devPattern.size());
thrust::device_vector<bool> devOutputFound(workAmount);
if (mode == "all") {
auto launchParameters = calculateOptimalLaunchParameters(computeAll<Type, Type>, 0, workAmount).getReal();
dim3 dimGrid = launchParameters.first;
dim3 dimBlock = launchParameters.second;
/** ---------------------------------------- Launch the kernel ---------------------------------------- **/
runWithProfiler([&] {
hipLaunchKernelGGL(( computeAll<Type, Type>) , dim3(dimGrid), dim3(dimBlock), 0, 0,
devSequence, devDistinctSequence, devPattern, devDistinctPattern, workAmount,
devOutputVariations.data().get(), devOutputFound.data().get()
);
});
}
else if (mode == "any") {
auto launchParameters = calculateOptimalLaunchParameters(computeAll<Type, Type>, 0, workAmount).getRealResident();
dim3 dimGrid = launchParameters.first;
dim3 dimBlock = launchParameters.second;
CudaBuffer<bool> anyFound(false);
/** ---------------------------------------- Launch the kernel ---------------------------------------- **/
runWithProfiler([&] {
hipLaunchKernelGGL(( computeAny<Type, Type>) , dim3(1), dim3(1), 0, 0,
devSequence, devDistinctSequence, devPattern, devDistinctPattern, workAmount, dimGrid.x, dimBlock.x,
devOutputVariations.data().get(), devOutputFound.data().get(), anyFound
);
});
std::cout << "Was any pattern found: " << (anyFound.getValue() ? "YES" : "NO") << std::endl;
}
else {
throw std::runtime_error("Wrong argument - allowed [all/any]");
}
/** ------------------------------------------- Process results ------------------------------------------- **/
auto variationsCorrect = thrust::count(devOutputFound.begin(), devOutputFound.end(), true);
thrust::device_vector<Type> devResult(variationsCorrect * devPattern.size());
auto predicateSource = thrust::make_transform_iterator(thrust::counting_iterator<Type>(0),
FoundMatcher<Type>(devPattern.size(), devOutputFound.data().get()));
thrust::copy_if(devOutputVariations.begin(), devOutputVariations.end(), predicateSource, devResult.begin(), _1 == true);
thrust::host_vector<Type> result(devResult);
std::cout << "Found " << variationsCorrect << " patterns" << std::endl;
for (std::size_t resultIdx = 0; resultIdx < result.size(); resultIdx += devPattern.size()) {
for (std::size_t i = 0; i < devPattern.size(); ++i) {
std::cout << result[resultIdx + i] << " ";
}
std::cout << std::endl;
}
}
| 7c57eed437d18ba369b9c307ce1161f51b035904.cu | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include <array>
#include <cmath>
#include <vector>
#include <type_traits>
#include <cassert>
#include <set>
#include <algorithm>
#include <string>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
#include <sstream>
#include <cooperative_groups.h>
#include "device_launch_parameters.h"
#include "CudaUtils.h"
using Type = int;
using namespace thrust::placeholders;
template <typename Container>
std::string printContainer(Container const& container) {
if (container.empty()) {
return "{}";
}
std::string result = "{" + std::to_string(*(container.begin()));
if (container.size() == 1) {
return result + "}";
}
for (auto it = std::next(container.begin()); it != container.end(); ++it) {
result += "," + std::to_string(*it);
}
result += '}';
return result;
}
template <class DestContainer, class Source>
DestContainer parseTo(Source const& source) {
using Target = typename DestContainer::value_type;
std::istringstream is(source);
return DestContainer(std::istream_iterator<Target>(is), std::istream_iterator<Target>());
}
template <typename T>
void removeDuplicates(std::vector<T>& data) {
std::sort(data.begin(), data.end());
auto iter = std::unique(data.begin(), data.end());
data.resize(std::distance(data.begin(), iter));
}
template <typename T>
thrust::device_vector<T> removeDuplicates(thrust::device_vector<T> const& input) {
thrust::device_vector<T> result(input);
thrust::sort(thrust::device, result.begin(), result.end());
auto iter = thrust::unique(thrust::device, result.begin(), result.end());
result.resize(std::distance(result.begin(), iter));
return result;
}
template <typename S>
struct FoundMatcher : public thrust::unary_function<S, bool> {
FoundMatcher(S const patternSize, bool const * const found) : patternSize(patternSize), found(found) { }
__device__ __host__ bool operator()(S index) {
return found[index / patternSize];
}
S const patternSize;
bool const * const found;
};
using Block = dim3;
using Grid = dim3;
struct LaunchParameters {
int optimalBlockSize;
int optimalGridSize;
int workloadGridSize;
/** Maximum occupancy - all blocks resident on SMs**/
std::pair<Grid, Block> getOptimal() {
return { dim3(optimalGridSize), dim3(optimalBlockSize) };
}
/** Real occupancy - minimal launch configuration to handle the workload **/
std::pair<Grid, Block> getReal() {
return { dim3(workloadGridSize), dim3(optimalBlockSize) };
}
/** Real occupancy bounded by the residency requirement. No non-resident blocks will be launched **/
std::pair<Grid, Block> getRealResident() {
return { dim3(std::min(optimalGridSize, workloadGridSize)), dim3(optimalBlockSize) };
}
};
template <typename Kernel>
LaunchParameters calculateOptimalLaunchParameters(Kernel kernel, std::size_t dynamicSMemSize, int blockSizeLimit) {
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
int maxActiveBlocks;
int device;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel, dynamicSMemSize, blockSizeLimit);
gridSize = (blockSizeLimit + blockSize - 1) / blockSize;
printf("[CUDA] Optimal block size: %d, grid size: %d\n", blockSize, minGridSize);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, kernel, blockSize, dynamicSMemSize);
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /(float)(props.maxThreadsPerMultiProcessor / props.warpSize);
printf("[CUDA] Theoretical occupancy assuming launching with optimal grid %d: %f\n", minGridSize, occupancy);
printf("[CUDA] Theoretical occupancy assuming launching with neccessary grid %d to handle all the work: %f\n", gridSize, occupancy * ((float) gridSize / minGridSize));
return LaunchParameters { blockSize, minGridSize, gridSize };
}
__device__ void logLaunchParameters(char const* const kernelName) {
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid == 0) {
printf("[CUDA] Invoking %s with: Block(%d,%d), Grid(%d,%d)\n", kernelName, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
template <typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ std::size_t factorial(Integral const n) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0);
#if __cplusplus >= 201703L
}
#endif
if (n == 0) {
return 1;
}
std::size_t result = 1;
for (Integral i = 2; i <= n; ++i) {
result *= i;
}
assert(result > 0 && "Overflow detected!");
return result;
}
template <typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ std::size_t variationsCount(Integral const n, Integral const k) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0 && k >= 0);
#if __cplusplus >= 201703L
}
#endif
assert(n >= k);
if (k == 0) {
return 1;
}
std::size_t result = n - k + 1;
for (std::size_t i = result + 1; i <= n; ++i) {
result *= i;
}
return result;
}
template <typename T, typename Integral, typename std::enable_if_t<std::is_integral<Integral>::value>* = nullptr>
__host__ __device__ void computeVariation(T const* const input, Integral const n, Integral const k, Integral p, T* const output) {
#if __cplusplus >= 201703L
if constexpr (!std::is_unsigned<Integral>::value) {
#endif
assert(n >= 0 && k >= 0 && k <= n && p >= 0 && p < variationsCount(n, k));
#if __cplusplus >= 201703L
}
#endif
bool* removed = new bool[n];
for (Integral i = 0; i < n; ++i) {
removed[i] = false;
}
for (Integral x = 0; x < k; ++x) {
Integral v = variationsCount(n - x - 1, k - x - 1);
Integral t = p / v;
for (Integral i = 0; i <= t; ++i) {
if (removed[i]) {
++t;
}
}
output[x] = input[t];
removed[t] = true;
p = p % v;
}
delete[] removed;
}
template <typename T, typename S>
__host__ __device__ void substitutePattern(GpuData<T, S> const& pattern, GpuData<T, S> const& distinctPattern,
T const* const variation, T* const output) {
for (S patternIndex = 0; patternIndex < pattern.length; ++patternIndex) {
T currentPatternSymbol = pattern.data[patternIndex];
// Find the substitution
for (S distinctPatternIndex = 0; distinctPatternIndex < distinctPattern.length; ++distinctPatternIndex) {
T currentDistinctPatternSymbol = distinctPattern.data[distinctPatternIndex];
if (currentDistinctPatternSymbol == currentPatternSymbol) {
output[patternIndex] = variation[distinctPatternIndex];
break;
}
}
}
}
template <typename T, typename S>
__host__ __device__ bool checkPattern(GpuData<T, S> const& sequence, GpuData<T, S> const& pattern) {
T const* sequencePtr = sequence.data;
T const* patternPtr = pattern.data;
while (sequencePtr - sequence.data < sequence.length) {
if (*patternPtr == *sequencePtr) {
++patternPtr;
if (patternPtr - pattern.data == pattern.length) {
// printf("[WorkNo %d] Matches! %d %d %d %d %d %d %d\n", workNo, pattern.data[0], pattern.data[1], pattern.data[2], pattern.data[3], pattern.data[4], pattern.data[5], pattern.data[6]);
return true;
}
}
++sequencePtr;
}
// printf("[WorkNo %d] Not matches!\n", gtid);
return false;
}
template <typename T, typename S>
__device__ bool compute(GpuData<T, S> const& sequence, GpuData<T, S> const& distinctSequence, GpuData<T, S> const& pattern,
GpuData<T, S> const& distinctPattern, S workNo, T* const outputVariations, bool* const outputFound) {
T* variation = new T[distinctPattern.length];
// Compute the variation to be checked by this thread
computeVariation(distinctSequence.data, distinctSequence.length, distinctPattern.length, workNo, variation);
T* finalPattern = outputVariations + (workNo * pattern.length);
// Assign computed values to the pattern
substitutePattern(pattern, distinctPattern, variation, finalPattern);
delete[] variation;
bool found = checkPattern(sequence, GpuData<T, S> { finalPattern, pattern.length });
outputFound[workNo] = found;
return found;
}
template <typename T, typename S>
__global__ void computeAll(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence,
GpuData<T, S> const pattern, GpuData<T, S> const distinctPattern, S const workAmount,
T* const outputVariations, bool* const outputFound) {
logLaunchParameters("computeAll");
int workNo = blockIdx.x * blockDim.x + threadIdx.x;
while (workNo < workAmount) {
compute<T, S>(sequence, distinctSequence, pattern, distinctPattern, workNo, outputVariations, outputFound);
workNo += (blockDim.x * gridDim.x);
}
}
template <typename T, typename S>
__global__ void computeAnyWrapper(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence, GpuData<T, S> const pattern,
GpuData<T, S> const distinctPattern, S const workAmount, S const iteration,
T* const outputVariations, bool* const outputFound, volatile bool* const anyFound) {
logLaunchParameters("computeAnyWrapper");
int const workNo = (blockIdx.x * blockDim.x + threadIdx.x) + (iteration * blockDim.x * gridDim.x);
if (workNo < workAmount) {
bool found = compute<T, S>(sequence, distinctSequence, pattern, distinctPattern, workNo,
outputVariations, outputFound);
if (found) {
*anyFound = true;
}
}
}
template <typename T, typename S>
__global__ void computeAny(GpuData<T, S> const sequence, GpuData<T, S> const distinctSequence, GpuData<T, S> const pattern,
GpuData<T, S> const distinctPattern, S const workAmount, int const gridSize, int const blockSize,
T* const outputVariations, bool* const outputFound, volatile bool* const anyFound) {
logLaunchParameters("computeAny");
assert(blockDim.x == 1 && gridDim.x == 1);
S iterationNo = 0;
do {
computeAnyWrapper<T, S><<<gridSize, blockSize>>>(sequence, distinctSequence, pattern, distinctPattern, workAmount,
iterationNo, outputVariations, outputFound, anyFound);
cudaDeviceSynchronize();
printf("[KERNEL] Checked range [%d, %d]\n", (iterationNo) * blockSize * gridSize, (iterationNo + 1) * blockSize * gridSize - 1);
if (*anyFound) {
printf("[KERNEL] Found matching pattern!\n");
return;
}
++iterationNo;
} while (iterationNo * blockSize * gridSize < workAmount);
}
int main(int argc, char** argv) {
if (argc < 4) {
std::cerr << "Too few arguments. Usage: " << argv[0] << " <[any/all]> <pattern> <sequence>."
"Pattern and sequence should be quoted lists of integers seperated by whitespaces.";
exit(1);
}
std::string const mode(argv[1]);
auto const devPattern = parseTo<thrust::device_vector<Type>>(argv[2]);
auto const devSequence = parseTo<thrust::device_vector<Type>>(argv[3]);
assert(devPattern.size() <= devSequence.size());
auto const devDistinctPattern = removeDuplicates(devPattern);
auto const devDistinctSequence = removeDuplicates(devSequence);
Type workAmount = variationsCount(devDistinctSequence.size(), devDistinctPattern.size());
std::cout << "[INFO] Variations to check: " << workAmount << std::endl;
thrust::device_vector<Type> devOutputVariations(workAmount * devPattern.size());
thrust::device_vector<bool> devOutputFound(workAmount);
if (mode == "all") {
auto launchParameters = calculateOptimalLaunchParameters(computeAll<Type, Type>, 0, workAmount).getReal();
dim3 dimGrid = launchParameters.first;
dim3 dimBlock = launchParameters.second;
/** ---------------------------------------- Launch the kernel ---------------------------------------- **/
runWithProfiler([&] {
computeAll<Type, Type> <<<dimGrid, dimBlock>>> (
devSequence, devDistinctSequence, devPattern, devDistinctPattern, workAmount,
devOutputVariations.data().get(), devOutputFound.data().get()
);
});
}
else if (mode == "any") {
auto launchParameters = calculateOptimalLaunchParameters(computeAll<Type, Type>, 0, workAmount).getRealResident();
dim3 dimGrid = launchParameters.first;
dim3 dimBlock = launchParameters.second;
CudaBuffer<bool> anyFound(false);
/** ---------------------------------------- Launch the kernel ---------------------------------------- **/
runWithProfiler([&] {
computeAny<Type, Type> <<<1, 1>>> (
devSequence, devDistinctSequence, devPattern, devDistinctPattern, workAmount, dimGrid.x, dimBlock.x,
devOutputVariations.data().get(), devOutputFound.data().get(), anyFound
);
});
std::cout << "Was any pattern found: " << (anyFound.getValue() ? "YES" : "NO") << std::endl;
}
else {
throw std::runtime_error("Wrong argument - allowed [all/any]");
}
/** ------------------------------------------- Process results ------------------------------------------- **/
auto variationsCorrect = thrust::count(devOutputFound.begin(), devOutputFound.end(), true);
thrust::device_vector<Type> devResult(variationsCorrect * devPattern.size());
auto predicateSource = thrust::make_transform_iterator(thrust::counting_iterator<Type>(0),
FoundMatcher<Type>(devPattern.size(), devOutputFound.data().get()));
thrust::copy_if(devOutputVariations.begin(), devOutputVariations.end(), predicateSource, devResult.begin(), _1 == true);
thrust::host_vector<Type> result(devResult);
std::cout << "Found " << variationsCorrect << " patterns" << std::endl;
for (std::size_t resultIdx = 0; resultIdx < result.size(); resultIdx += devPattern.size()) {
for (std::size_t i = 0; i < devPattern.size(); ++i) {
std::cout << result[resultIdx + i] << " ";
}
std::cout << std::endl;
}
}
|
23a2a8722259d161a4d307ba160be962a2600845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Vector2.cuh"
#include "ColorF.cuh"
#include "Screen.cuh"
#include "Matrix4.cuh"
#include "Sphere.cuh"
#include "Camera.cuh"
#include "ConvMask.cuh"
#include "Renderer.cuh"
#include "Texture_hip.cuh"
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <stdexcept>
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace std;
#define prune(number) ( number = number < 0 ? 0 : number > 1 ? 1 : number )
int main(){
Renderer renderer{{1920, 1080},
{},
{120.0/180.0 * 3.1415, 1920, 1080}};
renderer.scene.vertices.push_back({0.1, 0, -10});
renderer.scene.vertices.push_back({1, 3, -10});
renderer.scene.vertices.push_back({3, 3, -10});
renderer.scene.vertices.push_back({3, 0, -8});
renderer.scene.vertices.push_back({0, 1, -10.1});
renderer.scene.vertices.push_back({-4, -4, -10});
renderer.scene.vertices.push_back({4, -4, -13});
renderer.scene.vertices.push_back({4, 4, -13});
renderer.scene.vertices.push_back({-4, 4, -10});
renderer.scene.materials.push_back({{}, {255, 0, 0}, 0.9, {}, 0.1});
renderer.scene.materials.push_back({{}, {255, 255, 0}, 0.9, {}, 0.1});
renderer.scene.materials.push_back({{}, {}, 0.3, {}, 0.7});
renderer.scene.triangles.push_back({0, 1, 2, 0,0,0,0});
renderer.scene.triangles.push_back({4, 3, 2, 0,0,0,1});
renderer.scene.triangles.push_back({5, 6, 7, 0,0,0,2});
renderer.scene.triangles.push_back({8, 5, 7, 0,0,0, 2});
renderer.render();
// ConvMask c{1, 1, 1,
// 1, 1, 1,
// 1, 1, 1};
hipDeviceSynchronize();
//convolution<<<dim3((renderer.screen.sizeX + BLOCK_X - 3)/(BLOCK_X-2), (renderer.screen.sizeY + BLOCK_Y - 3)/(BLOCK_Y-2)), dim3(BLOCK_X, BLOCK_Y)>>>(renderer.screen, c);
// renderer.screen.copy(hipMemcpyDeviceToHost);
// hipMemcpy(renderer.screen.d_image, sky.img, sizeof(ColorF) * renderer.screen.sizeX * renderer.screen.sizeY, hipMemcpyHostToDevice);
renderer.screen.copyAndSave("out.ppm");
return 0;
} | 23a2a8722259d161a4d307ba160be962a2600845.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Vector2.cuh"
#include "ColorF.cuh"
#include "Screen.cuh"
#include "Matrix4.cuh"
#include "Sphere.cuh"
#include "Camera.cuh"
#include "ConvMask.cuh"
#include "Renderer.cuh"
#include "Texture.cuh"
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <stdexcept>
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace std;
#define prune(number) ( number = number < 0 ? 0 : number > 1 ? 1 : number )
int main(){
Renderer renderer{{1920, 1080},
{},
{120.0/180.0 * 3.1415, 1920, 1080}};
renderer.scene.vertices.push_back({0.1, 0, -10});
renderer.scene.vertices.push_back({1, 3, -10});
renderer.scene.vertices.push_back({3, 3, -10});
renderer.scene.vertices.push_back({3, 0, -8});
renderer.scene.vertices.push_back({0, 1, -10.1});
renderer.scene.vertices.push_back({-4, -4, -10});
renderer.scene.vertices.push_back({4, -4, -13});
renderer.scene.vertices.push_back({4, 4, -13});
renderer.scene.vertices.push_back({-4, 4, -10});
renderer.scene.materials.push_back({{}, {255, 0, 0}, 0.9, {}, 0.1});
renderer.scene.materials.push_back({{}, {255, 255, 0}, 0.9, {}, 0.1});
renderer.scene.materials.push_back({{}, {}, 0.3, {}, 0.7});
renderer.scene.triangles.push_back({0, 1, 2, 0,0,0,0});
renderer.scene.triangles.push_back({4, 3, 2, 0,0,0,1});
renderer.scene.triangles.push_back({5, 6, 7, 0,0,0,2});
renderer.scene.triangles.push_back({8, 5, 7, 0,0,0, 2});
renderer.render();
// ConvMask c{1, 1, 1,
// 1, 1, 1,
// 1, 1, 1};
cudaDeviceSynchronize();
//convolution<<<dim3((renderer.screen.sizeX + BLOCK_X - 3)/(BLOCK_X-2), (renderer.screen.sizeY + BLOCK_Y - 3)/(BLOCK_Y-2)), dim3(BLOCK_X, BLOCK_Y)>>>(renderer.screen, c);
// renderer.screen.copy(cudaMemcpyDeviceToHost);
// cudaMemcpy(renderer.screen.d_image, sky.img, sizeof(ColorF) * renderer.screen.sizeX * renderer.screen.sizeY, cudaMemcpyHostToDevice);
renderer.screen.copyAndSave("out.ppm");
return 0;
} |
4f6b336bd4c3a11b032cf3b39d49f77a0c7dfe3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe{
template <typename Dtype>
__device__ Dtype backprojectPointDevice( int j, int i, Dtype* rotationMatrix, Dtype detectorX, Dtype detectorY , int detectorLength )
{
Dtype directionFirst = detectorX * rotationMatrix[ 0 ] + detectorY * rotationMatrix[ 1 ];
Dtype directionSecond = detectorX * rotationMatrix[ 2 ] + detectorY * rotationMatrix[ 3 ];
Dtype intersectionFirst = ( j * directionFirst + i * directionSecond ) / ( pow( directionFirst, 2 ) + pow( directionSecond, 2 ) ) * directionFirst;
Dtype intersectionSecond = ( j * directionFirst + i * directionSecond ) / ( pow( directionFirst, 2 ) + pow( directionSecond, 2 ) ) * directionSecond;
//TODO: this probably does not work for angles above 180
if( intersectionFirst > directionFirst )
{
return detectorLength;
}
return sqrt( pow( intersectionFirst - directionFirst, 2 ) + pow( intersectionSecond - directionSecond, 2 ) );
}
template <typename Dtype>
__device__ Dtype interpolateDevice( const Dtype* in, Dtype position, size_t projection, size_t detectorLength, size_t batchOffsetInput )
{
int firstPosition = static_cast< int >( position );
int secondPosition = firstPosition + 1;
// instead of calculating "1 - distance" we can just switch the two distances because they add up to 1
Dtype firstWeight = -1 * ( position - static_cast< Dtype >( secondPosition ) );
Dtype secondWeight = position - static_cast< Dtype >( firstPosition );
size_t depthIndex = projection * detectorLength + batchOffsetInput;
return firstWeight * in[ firstPosition + depthIndex ] + secondWeight * in[ secondPosition + depthIndex ];
}
template <typename Dtype>
__global__ void BackprojectionForward(const int count, const Dtype* in,
const int halfOutputWidth, const int halfOutputHeight, const int outputWidth,
const int outputHeight, const int detectorWidth, const int countOfProjections,
const int outputSize,
const Dtype scalingFactor,
Dtype* rotmatrices,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
const int l = index / outputSize;
const size_t batchOffsetInput = l * (detectorWidth * countOfProjections);
const int interBatchIndex = index - l * outputSize;
int i = interBatchIndex / outputWidth;
int j = interBatchIndex - ( i * outputWidth );
i -= halfOutputHeight;
j -= halfOutputWidth;
out[index] = 0;
// for every projection
#pragma unroll
for( size_t k = 0; k < countOfProjections; ++k ) {
Dtype position = backprojectPointDevice(j, i, rotmatrices + ( k * static_cast< size_t >( 4 ) ), static_cast< Dtype >( 0. ), -static_cast< Dtype >( halfOutputWidth ), detectorWidth );
if( position < detectorWidth-1 && position > 0.00001 )
{
out[ index ] += interpolateDevice( in, position, k, detectorWidth, batchOffsetInput );
}
}
out[ index ] *= scalingFactor;
}
}
template <typename Dtype>
void BackprojectionLayer< Dtype >::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const int count = top[0]->count();
const int outputHeight = top[0]->shape()[3];
const int outputWidth = top[0]->shape()[2];
const int halfHeight = outputHeight /2.;
const int halfWidth = outputWidth / 2.;
const int detectorWidth = bottom[0]->shape()[3];
const int countOfProjections = bottom[0]->shape()[2];
const int outputSize = outputWidth * outputHeight;
const Dtype scalingFactor = M_PI / ( countOfProjections );
std::vector< Dtype > rotationMatrices;
for( int k = 0; k < countOfProjections; ++k )
{
fillRotationMatrix( rotationMatrices, this->angles[k] );
}
// send matrices to device
Dtype *deviceRotationMatrices;
hipMalloc( (void**) &deviceRotationMatrices, sizeof(Dtype) * rotationMatrices.size() );
hipMemcpy(deviceRotationMatrices, &rotationMatrices[0], sizeof(Dtype) * rotationMatrices.size(), hipMemcpyHostToDevice);
// calculate
hipLaunchKernelGGL(( BackprojectionForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
bottom[0]->gpu_data(),
halfWidth,
halfHeight,
outputWidth,
outputHeight,
detectorWidth,
countOfProjections,
outputSize,
scalingFactor,
deviceRotationMatrices,
top[0]->mutable_gpu_data()
);
CUDA_POST_KERNEL_CHECK;
}
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicAdd(float* address, float val)
{
return __fAtomicAdd(address, val);
}
template <typename Dtype>
__global__ void BackprojectionBackward(const int count, const Dtype* __restrict__ top,
const int halfTopWidth, const int halfTopHeight, const int topWidth,
const int topHeight, const int detectorWidth, const int countOfProjections,
const int topSize,
Dtype* __restrict__ rotmatrices,
Dtype* __restrict__ bottom ){
CUDA_KERNEL_LOOP(index, count) {
const int l = index / topSize;
const size_t batchOffsetBottom = l * (detectorWidth * countOfProjections);
const size_t batchOffsetTop = l * topSize;
const int interBatchIndex = index - batchOffsetTop;
int i = interBatchIndex / topWidth;
int j = interBatchIndex - ( i * topWidth );
i -= halfTopHeight;
j -= halfTopWidth;
Dtype currentError = top[ index ];
size_t depthIndex = batchOffsetBottom;
// for every projection
#pragma unroll
for( size_t k = 0; k < countOfProjections; ++k ) {
// determine the weights and the input positions
Dtype position = backprojectPointDevice(j, i, rotmatrices + ( k * static_cast< size_t >( 4 ) ), static_cast< Dtype >( 0. ), -static_cast< Dtype >( halfTopWidth ), detectorWidth );
if( position < detectorWidth-1 && position > 0.00001 )
{
int firstPosition = static_cast< int >( position );
int secondPosition = firstPosition + 1;
// instead of calculating "1 - distance" we can just switch the two distances because they add up to 1
Dtype firstWeight = -1 * ( position - static_cast< Dtype >( secondPosition ) );
Dtype secondWeight = position - static_cast< Dtype >( firstPosition );
Dtype* currentFloorPosition = bottom + firstPosition + depthIndex;
Dtype value = firstWeight * currentError;
atomicAdd( currentFloorPosition, value );
Dtype* currentCeilPosition = currentFloorPosition + static_cast< size_t >( 1 );
value = secondWeight * currentError;
atomicAdd( currentCeilPosition, value );
}
depthIndex += detectorWidth;
}
}
}
template <typename Dtype>
void BackprojectionLayer< Dtype >::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
const int count = top[0]->count();
const int topHeight = top[0]->shape()[3];
const int topWidth = top[0]->shape()[2];
const int halfTopHeight = topHeight /2.;
const int halfTopWidth = topWidth / 2.;
const int detectorWidth = bottom[0]->shape()[3];
const int countOfProjections = bottom[0]->shape()[2];
const int topSize = topWidth * topHeight;
std::vector< Dtype > rotationMatrices;
for( int k = 0; k < countOfProjections; ++k )
{
fillRotationMatrix( rotationMatrices, this->angles[k] );
}
// send matrices to device
Dtype *deviceRotationMatrices;
hipMalloc( (void**) &deviceRotationMatrices, sizeof(Dtype) * rotationMatrices.size() );
hipMemcpy(deviceRotationMatrices, &rotationMatrices[0], sizeof(Dtype) * rotationMatrices.size(), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( BackprojectionBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
top[0]->gpu_diff(),
halfTopWidth,
halfTopHeight,
topWidth,
topHeight,
detectorWidth,
countOfProjections,
topSize,
deviceRotationMatrices,
bottom[0]->mutable_gpu_diff()
);
CUDA_POST_KERNEL_CHECK;
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype scalingFactor = 1. / static_cast< Dtype >( topSize );
const int batchSize = bottom[0]->shape()[0];
for( int l = 0; l < batchSize; ++l )
{
const size_t batchOffsetBottom = l * detectorWidth * countOfProjections;
for (int i = 0; i < countOfProjections; ++i) {
for (int j = 0; j < detectorWidth; ++j) {
bottom_diff[batchOffsetBottom + j+(detectorWidth*i)] *= scalingFactor;
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BackprojectionLayer);
} // namespace caffe
| 4f6b336bd4c3a11b032cf3b39d49f77a0c7dfe3f.cu |
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe{
template <typename Dtype>
__device__ Dtype backprojectPointDevice( int j, int i, Dtype* rotationMatrix, Dtype detectorX, Dtype detectorY , int detectorLength )
{
Dtype directionFirst = detectorX * rotationMatrix[ 0 ] + detectorY * rotationMatrix[ 1 ];
Dtype directionSecond = detectorX * rotationMatrix[ 2 ] + detectorY * rotationMatrix[ 3 ];
Dtype intersectionFirst = ( j * directionFirst + i * directionSecond ) / ( pow( directionFirst, 2 ) + pow( directionSecond, 2 ) ) * directionFirst;
Dtype intersectionSecond = ( j * directionFirst + i * directionSecond ) / ( pow( directionFirst, 2 ) + pow( directionSecond, 2 ) ) * directionSecond;
//TODO: this probably does not work for angles above 180
if( intersectionFirst > directionFirst )
{
return detectorLength;
}
return sqrt( pow( intersectionFirst - directionFirst, 2 ) + pow( intersectionSecond - directionSecond, 2 ) );
}
template <typename Dtype>
__device__ Dtype interpolateDevice( const Dtype* in, Dtype position, size_t projection, size_t detectorLength, size_t batchOffsetInput )
{
int firstPosition = static_cast< int >( position );
int secondPosition = firstPosition + 1;
// instead of calculating "1 - distance" we can just switch the two distances because they add up to 1
Dtype firstWeight = -1 * ( position - static_cast< Dtype >( secondPosition ) );
Dtype secondWeight = position - static_cast< Dtype >( firstPosition );
size_t depthIndex = projection * detectorLength + batchOffsetInput;
return firstWeight * in[ firstPosition + depthIndex ] + secondWeight * in[ secondPosition + depthIndex ];
}
template <typename Dtype>
__global__ void BackprojectionForward(const int count, const Dtype* in,
const int halfOutputWidth, const int halfOutputHeight, const int outputWidth,
const int outputHeight, const int detectorWidth, const int countOfProjections,
const int outputSize,
const Dtype scalingFactor,
Dtype* rotmatrices,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
const int l = index / outputSize;
const size_t batchOffsetInput = l * (detectorWidth * countOfProjections);
const int interBatchIndex = index - l * outputSize;
int i = interBatchIndex / outputWidth;
int j = interBatchIndex - ( i * outputWidth );
i -= halfOutputHeight;
j -= halfOutputWidth;
out[index] = 0;
// for every projection
#pragma unroll
for( size_t k = 0; k < countOfProjections; ++k ) {
Dtype position = backprojectPointDevice(j, i, rotmatrices + ( k * static_cast< size_t >( 4 ) ), static_cast< Dtype >( 0. ), -static_cast< Dtype >( halfOutputWidth ), detectorWidth );
if( position < detectorWidth-1 && position > 0.00001 )
{
out[ index ] += interpolateDevice( in, position, k, detectorWidth, batchOffsetInput );
}
}
out[ index ] *= scalingFactor;
}
}
template <typename Dtype>
void BackprojectionLayer< Dtype >::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const int count = top[0]->count();
const int outputHeight = top[0]->shape()[3];
const int outputWidth = top[0]->shape()[2];
const int halfHeight = outputHeight /2.;
const int halfWidth = outputWidth / 2.;
const int detectorWidth = bottom[0]->shape()[3];
const int countOfProjections = bottom[0]->shape()[2];
const int outputSize = outputWidth * outputHeight;
const Dtype scalingFactor = M_PI / ( countOfProjections );
std::vector< Dtype > rotationMatrices;
for( int k = 0; k < countOfProjections; ++k )
{
fillRotationMatrix( rotationMatrices, this->angles[k] );
}
// send matrices to device
Dtype *deviceRotationMatrices;
cudaMalloc( (void**) &deviceRotationMatrices, sizeof(Dtype) * rotationMatrices.size() );
cudaMemcpy(deviceRotationMatrices, &rotationMatrices[0], sizeof(Dtype) * rotationMatrices.size(), cudaMemcpyHostToDevice);
// calculate
BackprojectionForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count,
bottom[0]->gpu_data(),
halfWidth,
halfHeight,
outputWidth,
outputHeight,
detectorWidth,
countOfProjections,
outputSize,
scalingFactor,
deviceRotationMatrices,
top[0]->mutable_gpu_data()
);
CUDA_POST_KERNEL_CHECK;
}
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicAdd(float* address, float val)
{
return __fAtomicAdd(address, val);
}
template <typename Dtype>
__global__ void BackprojectionBackward(const int count, const Dtype* __restrict__ top,
const int halfTopWidth, const int halfTopHeight, const int topWidth,
const int topHeight, const int detectorWidth, const int countOfProjections,
const int topSize,
Dtype* __restrict__ rotmatrices,
Dtype* __restrict__ bottom ){
CUDA_KERNEL_LOOP(index, count) {
const int l = index / topSize;
const size_t batchOffsetBottom = l * (detectorWidth * countOfProjections);
const size_t batchOffsetTop = l * topSize;
const int interBatchIndex = index - batchOffsetTop;
int i = interBatchIndex / topWidth;
int j = interBatchIndex - ( i * topWidth );
i -= halfTopHeight;
j -= halfTopWidth;
Dtype currentError = top[ index ];
size_t depthIndex = batchOffsetBottom;
// for every projection
#pragma unroll
for( size_t k = 0; k < countOfProjections; ++k ) {
// determine the weights and the input positions
Dtype position = backprojectPointDevice(j, i, rotmatrices + ( k * static_cast< size_t >( 4 ) ), static_cast< Dtype >( 0. ), -static_cast< Dtype >( halfTopWidth ), detectorWidth );
if( position < detectorWidth-1 && position > 0.00001 )
{
int firstPosition = static_cast< int >( position );
int secondPosition = firstPosition + 1;
// instead of calculating "1 - distance" we can just switch the two distances because they add up to 1
Dtype firstWeight = -1 * ( position - static_cast< Dtype >( secondPosition ) );
Dtype secondWeight = position - static_cast< Dtype >( firstPosition );
Dtype* currentFloorPosition = bottom + firstPosition + depthIndex;
Dtype value = firstWeight * currentError;
atomicAdd( currentFloorPosition, value );
Dtype* currentCeilPosition = currentFloorPosition + static_cast< size_t >( 1 );
value = secondWeight * currentError;
atomicAdd( currentCeilPosition, value );
}
depthIndex += detectorWidth;
}
}
}
template <typename Dtype>
void BackprojectionLayer< Dtype >::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
const int count = top[0]->count();
const int topHeight = top[0]->shape()[3];
const int topWidth = top[0]->shape()[2];
const int halfTopHeight = topHeight /2.;
const int halfTopWidth = topWidth / 2.;
const int detectorWidth = bottom[0]->shape()[3];
const int countOfProjections = bottom[0]->shape()[2];
const int topSize = topWidth * topHeight;
std::vector< Dtype > rotationMatrices;
for( int k = 0; k < countOfProjections; ++k )
{
fillRotationMatrix( rotationMatrices, this->angles[k] );
}
// send matrices to device
Dtype *deviceRotationMatrices;
cudaMalloc( (void**) &deviceRotationMatrices, sizeof(Dtype) * rotationMatrices.size() );
cudaMemcpy(deviceRotationMatrices, &rotationMatrices[0], sizeof(Dtype) * rotationMatrices.size(), cudaMemcpyHostToDevice);
BackprojectionBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count,
top[0]->gpu_diff(),
halfTopWidth,
halfTopHeight,
topWidth,
topHeight,
detectorWidth,
countOfProjections,
topSize,
deviceRotationMatrices,
bottom[0]->mutable_gpu_diff()
);
CUDA_POST_KERNEL_CHECK;
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype scalingFactor = 1. / static_cast< Dtype >( topSize );
const int batchSize = bottom[0]->shape()[0];
for( int l = 0; l < batchSize; ++l )
{
const size_t batchOffsetBottom = l * detectorWidth * countOfProjections;
for (int i = 0; i < countOfProjections; ++i) {
for (int j = 0; j < detectorWidth; ++j) {
bottom_diff[batchOffsetBottom + j+(detectorWidth*i)] *= scalingFactor;
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BackprojectionLayer);
} // namespace caffe
|
3dab9808b459852a9fa2fc906afb51ce070618e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_struct.cuh"
#include "common_hip.cuh"
#include "pitch.h"
template<typename T>
__device__ __forceinline__
const T* get_pointer_const(const T* data,
int node_index, int num_nodes, int chunk_size, int stream_id) {
return (data + (stream_id * chunk_size * num_nodes)
+ (node_index * num_nodes));
}
template<typename T>
__device__ __forceinline__ T* get_pointer(T* data, int node_index,
int num_nodes, int chunk_size, int stream_id) {
return (data + (stream_id * chunk_size * num_nodes)
+ (node_index * num_nodes));
}
__device__ __forceinline__
int get_ceil(float dividend, int divisor) {
return ((int) (ceilf(dividend / divisor)));
}
__device__ __forceinline__
unsigned getBit(unsigned long long val, int pos) {
unsigned long long ret;
asm("bfe.u64 %0, %1, %2, 1;" : "=l"(ret) : "l"(val), "r"(pos));
return (unsigned) ret;
}
__global__
void __kernel_init_edge(const int* __restrict__ d_non_tree_edges,
const int* __restrict__ d_edge_offsets, int *d_precompute_array,
const int* __restrict__ d_fvs_vertices,
const unsigned long long *d_si_vector, int start, int end,
int stream_index, int chunk_size, int original_nodes, int size_vector,
int fvs_size, int num_non_tree_edges, int num_edges) {
int si_index = -1;
unsigned long long si_value;
int src_index = blockIdx.x + start;
if (src_index >= end)
return;
int *d_row = get_pointer(d_precompute_array, src_index - start,
original_nodes, chunk_size, stream_index);
const int* __restrict__ d_edge = get_pointer_const(d_edge_offsets,
src_index - start, original_nodes, chunk_size, stream_index);
for (int edge_index = threadIdx.x; edge_index < original_nodes;
edge_index += blockDim.x) {
int edge_offset = __ldg(&d_edge[edge_index]);
//tree edges
if (edge_offset >= 0) {
int non_tree_edge_loc = __ldg(&d_non_tree_edges[edge_offset]);
//non_tree_edge
if (non_tree_edge_loc >= 0) {
int p_idx = non_tree_edge_loc >> 6;
if (si_index != p_idx) {
si_index = p_idx;
si_value = __ldg(&d_si_vector[si_index]);
}
d_row[edge_index] = getBit(si_value, non_tree_edge_loc & 63);
} else
//tree edge
d_row[edge_index] = 0;
} else {
d_row[edge_index] = 0;
}
}
}
/**
* @brief This method is used to invoke a kernel whose function is defined in the details section.
* @details This method invokes a Kernel. The Kernel's task is to parallely do the following things in the order.
* a)For each source vertex between start and end (15 at a time(grid dimension)). We fill the precompute_array edges
* b)The precompute array is filled in the following way.
* If the edge is a tree edge in the original spanning tree. then its value is 0.
* else if Si contains 1 in the corresponding non-tree edge position then 1 else 0.
*
* @param start index of vertex from 0 - fvs_size - 2
* @param end index of vertex from 1 to fvs_size - 1
* @param stream_index 0 or 1
*/
void gpu_struct::Kernel_init_edges_helper(int start, int end,
int stream_index) {
int total_length = end - start;
hipLaunchKernelGGL(( __kernel_init_edge), dim3(total_length), dim3(512), 0, streams[stream_index],
d_non_tree_edges, d_edge_offsets, d_precompute_array,
d_fvs_vertices, d_si_vector, start, end, stream_index, chunk_size,
original_nodes, size_vector, fvs_size, num_non_tree_edges,
num_edges);
}
| 3dab9808b459852a9fa2fc906afb51ce070618e3.cu | #include "gpu_struct.cuh"
#include "common.cuh"
#include "pitch.h"
template<typename T>
__device__ __forceinline__
const T* get_pointer_const(const T* data,
int node_index, int num_nodes, int chunk_size, int stream_id) {
return (data + (stream_id * chunk_size * num_nodes)
+ (node_index * num_nodes));
}
template<typename T>
__device__ __forceinline__ T* get_pointer(T* data, int node_index,
int num_nodes, int chunk_size, int stream_id) {
return (data + (stream_id * chunk_size * num_nodes)
+ (node_index * num_nodes));
}
__device__ __forceinline__
int get_ceil(float dividend, int divisor) {
return ((int) (ceilf(dividend / divisor)));
}
__device__ __forceinline__
unsigned getBit(unsigned long long val, int pos) {
unsigned long long ret;
asm("bfe.u64 %0, %1, %2, 1;" : "=l"(ret) : "l"(val), "r"(pos));
return (unsigned) ret;
}
__global__
void __kernel_init_edge(const int* __restrict__ d_non_tree_edges,
const int* __restrict__ d_edge_offsets, int *d_precompute_array,
const int* __restrict__ d_fvs_vertices,
const unsigned long long *d_si_vector, int start, int end,
int stream_index, int chunk_size, int original_nodes, int size_vector,
int fvs_size, int num_non_tree_edges, int num_edges) {
int si_index = -1;
unsigned long long si_value;
int src_index = blockIdx.x + start;
if (src_index >= end)
return;
int *d_row = get_pointer(d_precompute_array, src_index - start,
original_nodes, chunk_size, stream_index);
const int* __restrict__ d_edge = get_pointer_const(d_edge_offsets,
src_index - start, original_nodes, chunk_size, stream_index);
for (int edge_index = threadIdx.x; edge_index < original_nodes;
edge_index += blockDim.x) {
int edge_offset = __ldg(&d_edge[edge_index]);
//tree edges
if (edge_offset >= 0) {
int non_tree_edge_loc = __ldg(&d_non_tree_edges[edge_offset]);
//non_tree_edge
if (non_tree_edge_loc >= 0) {
int p_idx = non_tree_edge_loc >> 6;
if (si_index != p_idx) {
si_index = p_idx;
si_value = __ldg(&d_si_vector[si_index]);
}
d_row[edge_index] = getBit(si_value, non_tree_edge_loc & 63);
} else
//tree edge
d_row[edge_index] = 0;
} else {
d_row[edge_index] = 0;
}
}
}
/**
* @brief This method is used to invoke a kernel whose function is defined in the details section.
* @details This method invokes a Kernel. The Kernel's task is to parallely do the following things in the order.
* a)For each source vertex between start and end (15 at a time(grid dimension)). We fill the precompute_array edges
* b)The precompute array is filled in the following way.
* If the edge is a tree edge in the original spanning tree. then its value is 0.
* else if Si contains 1 in the corresponding non-tree edge position then 1 else 0.
*
* @param start index of vertex from 0 - fvs_size - 2
* @param end index of vertex from 1 to fvs_size - 1
* @param stream_index 0 or 1
*/
void gpu_struct::Kernel_init_edges_helper(int start, int end,
int stream_index) {
int total_length = end - start;
__kernel_init_edge<<<total_length, 512, 0, streams[stream_index]>>>(
d_non_tree_edges, d_edge_offsets, d_precompute_array,
d_fvs_vertices, d_si_vector, start, end, stream_index, chunk_size,
original_nodes, size_vector, fvs_size, num_non_tree_edges,
num_edges);
}
|
daeacc192157452742c2b5dce88b807afc963d7a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <cuml/tree/algo_helper.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace fil {
struct Params {
DatasetParams data;
RegressionParams blobs;
ModelHandle model;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
RF_params rf;
int predict_repetitions;
};
class FIL : public RegressionFixture<float> {
typedef RegressionFixture<float> Base;
public:
FIL(const std::string& name, const Params& p)
/*
fitting to linear combinations in "y" normally yields trees that check
values of all significant columns, as well as their linear
combinations in "X". During inference, the exact threshold
values do not affect speed. The distribution of column popularity does
not affect speed barring lots of uninformative columns in succession.
Hence, this method represents real datasets well enough for both
classification and regression.
*/
: RegressionFixture<float>(name, p.data, p.blobs),
model(p.model),
p_rest(p) {}
static void regression_to_classification(float* y, int nrows, int nclasses,
hipStream_t stream) {
raft::linalg::unaryOp(
y, y, nrows,
[=] __device__(float a) {
return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses);
},
stream);
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (!params.rowMajor) {
state.SkipWithError("FIL only supports row-major inputs");
}
if (params.nclasses > 1) {
// convert regression ranges into [0..nclasses-1]
regression_to_classification(data.y, params.nrows, params.nclasses,
stream);
}
// create model
ML::RandomForestRegressorF rf_model;
auto* mPtr = &rf_model;
mPtr->trees = nullptr;
size_t train_nrows = ::min(params.nrows, 1000);
fit(*handle, mPtr, data.X, train_nrows, params.ncols, data.y, p_rest.rf);
CUDA_CHECK(hipStreamSynchronize(stream));
ML::build_treelite_forest(&model, &rf_model, params.ncols,
params.nclasses > 1 ? 2 : 1);
ML::fil::treelite_params_t tl_params = {
.algo = p_rest.algo,
.output_class = params.nclasses > 1, // cuML RF forest
.threshold = 1.f / params.nclasses, //Fixture::DatasetParams
.storage_type = p_rest.storage};
ML::fil::from_treelite(*handle, &forest, model, &tl_params);
// only time prediction
this->loopOnState(state, [this]() {
// Dataset<D, L> allocates y assuming one output value per input row,
// so not supporting predict_proba yet
for (int i = 0; i < p_rest.predict_repetitions; i++) {
ML::fil::predict(*this->handle, this->forest, this->data.y,
this->data.X, this->params.nrows, false);
}
});
}
void allocateBuffers(const ::benchmark::State& state) override {
Base::allocateBuffers(state);
}
void deallocateBuffers(const ::benchmark::State& state) override {
ML::fil::free(*handle, forest);
Base::deallocateBuffers(state);
}
private:
ML::fil::forest_t forest;
ModelHandle model;
Params p_rest;
};
struct FilBenchParams {
int nrows;
int ncols;
int nclasses;
int max_depth;
int ntrees;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
};
std::vector<Params> getInputs() {
std::vector<Params> out;
Params p;
p.data.rowMajor = true;
p.blobs = {
.n_informative = -1, // Just a placeholder value, anyway changed below
.effective_rank = -1, // Just a placeholder value, anyway changed below
.bias = 0.f,
.tail_strength = 0.1,
.noise = 0.01,
.shuffle = false,
.seed = 12345ULL};
p.rf = set_rf_params(10, /*max_depth */
(1 << 20), /* max_leaves */
1.f, /* max_features */
32, /* n_bins */
1, /* split_algo */
3, /* min_samples_leaf */
3, /* min_samples_split */
0.0f, /* min_impurity_decrease */
true, /* bootstrap_features */
true, /* bootstrap */
1, /* n_trees */
1.f, /* max_samples */
1234ULL, /* seed */
ML::CRITERION::MSE, /* split_criterion */
false, /* quantile_per_tree */
8, /* n_streams */
false, /* use_experimental_backend */
128 /* max_batch_size */
);
using ML::fil::algo_t;
using ML::fil::storage_type_t;
std::vector<FilBenchParams> var_params = {
{(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG},
{(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE,
algo_t::BATCH_TREE_REORG}};
for (auto& i : var_params) {
p.data.nrows = i.nrows;
p.data.ncols = i.ncols;
p.blobs.n_informative = i.ncols / 3;
p.blobs.effective_rank = i.ncols / 3;
p.data.nclasses = i.nclasses;
p.rf.tree_params.max_depth = i.max_depth;
p.rf.n_trees = i.ntrees;
p.storage = i.storage;
p.algo = i.algo;
p.predict_repetitions = 10;
out.push_back(p);
}
return out;
}
ML_BENCH_REGISTER(Params, FIL, "", getInputs());
} // end namespace fil
} // end namespace Bench
} // end namespace ML
| daeacc192157452742c2b5dce88b807afc963d7a.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <cuml/tree/algo_helper.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace fil {
struct Params {
DatasetParams data;
RegressionParams blobs;
ModelHandle model;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
RF_params rf;
int predict_repetitions;
};
class FIL : public RegressionFixture<float> {
typedef RegressionFixture<float> Base;
public:
FIL(const std::string& name, const Params& p)
/*
fitting to linear combinations in "y" normally yields trees that check
values of all significant columns, as well as their linear
combinations in "X". During inference, the exact threshold
values do not affect speed. The distribution of column popularity does
not affect speed barring lots of uninformative columns in succession.
Hence, this method represents real datasets well enough for both
classification and regression.
*/
: RegressionFixture<float>(name, p.data, p.blobs),
model(p.model),
p_rest(p) {}
static void regression_to_classification(float* y, int nrows, int nclasses,
cudaStream_t stream) {
raft::linalg::unaryOp(
y, y, nrows,
[=] __device__(float a) {
return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses);
},
stream);
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (!params.rowMajor) {
state.SkipWithError("FIL only supports row-major inputs");
}
if (params.nclasses > 1) {
// convert regression ranges into [0..nclasses-1]
regression_to_classification(data.y, params.nrows, params.nclasses,
stream);
}
// create model
ML::RandomForestRegressorF rf_model;
auto* mPtr = &rf_model;
mPtr->trees = nullptr;
size_t train_nrows = std::min(params.nrows, 1000);
fit(*handle, mPtr, data.X, train_nrows, params.ncols, data.y, p_rest.rf);
CUDA_CHECK(cudaStreamSynchronize(stream));
ML::build_treelite_forest(&model, &rf_model, params.ncols,
params.nclasses > 1 ? 2 : 1);
ML::fil::treelite_params_t tl_params = {
.algo = p_rest.algo,
.output_class = params.nclasses > 1, // cuML RF forest
.threshold = 1.f / params.nclasses, //Fixture::DatasetParams
.storage_type = p_rest.storage};
ML::fil::from_treelite(*handle, &forest, model, &tl_params);
// only time prediction
this->loopOnState(state, [this]() {
// Dataset<D, L> allocates y assuming one output value per input row,
// so not supporting predict_proba yet
for (int i = 0; i < p_rest.predict_repetitions; i++) {
ML::fil::predict(*this->handle, this->forest, this->data.y,
this->data.X, this->params.nrows, false);
}
});
}
void allocateBuffers(const ::benchmark::State& state) override {
Base::allocateBuffers(state);
}
void deallocateBuffers(const ::benchmark::State& state) override {
ML::fil::free(*handle, forest);
Base::deallocateBuffers(state);
}
private:
ML::fil::forest_t forest;
ModelHandle model;
Params p_rest;
};
struct FilBenchParams {
int nrows;
int ncols;
int nclasses;
int max_depth;
int ntrees;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
};
std::vector<Params> getInputs() {
std::vector<Params> out;
Params p;
p.data.rowMajor = true;
p.blobs = {
.n_informative = -1, // Just a placeholder value, anyway changed below
.effective_rank = -1, // Just a placeholder value, anyway changed below
.bias = 0.f,
.tail_strength = 0.1,
.noise = 0.01,
.shuffle = false,
.seed = 12345ULL};
p.rf = set_rf_params(10, /*max_depth */
(1 << 20), /* max_leaves */
1.f, /* max_features */
32, /* n_bins */
1, /* split_algo */
3, /* min_samples_leaf */
3, /* min_samples_split */
0.0f, /* min_impurity_decrease */
true, /* bootstrap_features */
true, /* bootstrap */
1, /* n_trees */
1.f, /* max_samples */
1234ULL, /* seed */
ML::CRITERION::MSE, /* split_criterion */
false, /* quantile_per_tree */
8, /* n_streams */
false, /* use_experimental_backend */
128 /* max_batch_size */
);
using ML::fil::algo_t;
using ML::fil::storage_type_t;
std::vector<FilBenchParams> var_params = {
{(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG},
{(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE,
algo_t::BATCH_TREE_REORG}};
for (auto& i : var_params) {
p.data.nrows = i.nrows;
p.data.ncols = i.ncols;
p.blobs.n_informative = i.ncols / 3;
p.blobs.effective_rank = i.ncols / 3;
p.data.nclasses = i.nclasses;
p.rf.tree_params.max_depth = i.max_depth;
p.rf.n_trees = i.ntrees;
p.storage = i.storage;
p.algo = i.algo;
p.predict_repetitions = 10;
out.push_back(p);
}
return out;
}
ML_BENCH_REGISTER(Params, FIL, "", getInputs());
} // end namespace fil
} // end namespace Bench
} // end namespace ML
|
19a8b5abed5442c824b082dca7a126617bf157f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/compress_inner_product_layer.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4*NUM_THREADS];
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s+t < n){
param[t] = fabs(mask[s+t]*wb[s+t]);
param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t];
if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0;
}
if (s+t+NUM_THREADS < n){
param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]);
param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS];
if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1;
else tcount[t+NUM_THREADS] = 0;
}
else{
param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t+stride];
param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride];
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2*NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s+t < n && mask[s+t]!=0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){
tcount[t+NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0)))
mask[index] = 0;
else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
hipMalloc(&pmu_g, sizeof(Dtype) * num_p);
hipMalloc(&pstd_g, sizeof(Dtype) * num_p);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCMomentCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i];
}
hipFree(pmu_g);hipFree(pstd_g);hipFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){
const unsigned int NUM_THREADS = 512;
unsigned int* pncount_g;
unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCNzeroCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
hipFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void CInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weightMask = this->blobs_[2]->mutable_gpu_data();
Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
}
if (this->phase_ == TRAIN){
// Calculate the mean and standard deviation of learnable parameters
if (this->std==0 && this->iter_==0){
unsigned int ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount);
}
this->mu /= ncount; this->std -= ncount*mu*mu;
this->std /= ncount; this->std = sqrt(std);
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
}
// Demonstrate the sparsity of compressed fully-connected layer
/********************************************************/
if(this->iter_%1000==0){
unsigned int ncount = 0;
CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount);
if (this->bias_term_) {
CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount);
}
float sparse_ratio = 100.0 * float(ncount)/(this->blobs_[0]->count() + this->blobs_[1]->count());
LOG(INFO)<<"===>> Sparsity [CInner]:"<<sparse_ratio<<"%"<<"\n";
}
/********************************************************/
// Calculate the weight mask and bias mask with probability
Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
}
}
}
// Calculate the current (masked) weight and bias
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weightTmp, bottom_data, (Dtype)0., top_data);
if (this->bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
biasTmp, top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weightTmp, (Dtype)0., top_data);
if (this->bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
biasTmp, (Dtype)1., top_data);
}
}
template <typename Dtype>
void CInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
if (this->param_propagate_down_[0]) {
//const Dtype* weightMask = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
//CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()),
// CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
//CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
//const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
// Gradient with respect to bias
//CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
// CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
//CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,bias_diff);
}
if (propagate_down[0]) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
// Gradient with respect to bottom data
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CInnerProductLayer);
} // namespace caffe
| 19a8b5abed5442c824b082dca7a126617bf157f9.cu | #include <vector>
#include "caffe/layers/compress_inner_product_layer.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4*NUM_THREADS];
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s+t < n){
param[t] = fabs(mask[s+t]*wb[s+t]);
param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t];
if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0;
}
if (s+t+NUM_THREADS < n){
param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]);
param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS];
if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1;
else tcount[t+NUM_THREADS] = 0;
}
else{
param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t+stride];
param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride];
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2*NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s+t < n && mask[s+t]!=0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){
tcount[t+NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0)))
mask[index] = 0;
else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
cudaMalloc(&pmu_g, sizeof(Dtype) * num_p);
cudaMalloc(&pstd_g, sizeof(Dtype) * num_p);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
CCMomentCollect<Dtype><<<num_p,NUM_THREADS>>>(n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i];
}
cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){
const unsigned int NUM_THREADS = 512;
unsigned int* pncount_g;
unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
CCNzeroCollect<Dtype><<<num_p,NUM_THREADS>>>(n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
cudaFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void CInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weightMask = this->blobs_[2]->mutable_gpu_data();
Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
}
if (this->phase_ == TRAIN){
// Calculate the mean and standard deviation of learnable parameters
if (this->std==0 && this->iter_==0){
unsigned int ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount);
}
this->mu /= ncount; this->std -= ncount*mu*mu;
this->std /= ncount; this->std = sqrt(std);
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
}
// Demonstrate the sparsity of compressed fully-connected layer
/********************************************************/
if(this->iter_%1000==0){
unsigned int ncount = 0;
CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount);
if (this->bias_term_) {
CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount);
}
float sparse_ratio = 100.0 * float(ncount)/(this->blobs_[0]->count() + this->blobs_[1]->count());
LOG(INFO)<<"===>> Sparsity [CInner]:"<<sparse_ratio<<"%"<<"\n";
}
/********************************************************/
// Calculate the weight mask and bias mask with probability
Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
}
}
}
// Calculate the current (masked) weight and bias
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weightTmp, bottom_data, (Dtype)0., top_data);
if (this->bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
biasTmp, top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weightTmp, (Dtype)0., top_data);
if (this->bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
biasTmp, (Dtype)1., top_data);
}
}
template <typename Dtype>
void CInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
if (this->param_propagate_down_[0]) {
//const Dtype* weightMask = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
//CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()),
// CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
//CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
//const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
// Gradient with respect to bias
//CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
// CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
//CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,bias_diff);
}
if (propagate_down[0]) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
// Gradient with respect to bottom data
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CInnerProductLayer);
} // namespace caffe
|
7399fe1862226d32be8de833422ed04a59e99136.hip | // !!! This is a file automatically generated by hipify!!!
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
*
**/
// C++ includes
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
// CUDA includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
// cub includes
#include <hipcub/hipcub.hpp>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelFedCablingMapGPU.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cudautils::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
fedId_ = cudautils::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - 1200, length / 2);
}
////////////////////
__device__ uint32_t getLink(uint32_t ww) {
return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask);
}
__device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask); }
__device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); }
__device__ bool isBarrel(uint32_t rawId) { return (1 == ((rawId >> 25) & 0x7)); }
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
//printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol);
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
// debug = true;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
uint32_t numRowsInRoc = 80;
uint32_t numColsInRoc = 52;
/// row and collumn in ROC representation
return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
__device__ uint8_t checkROC(
uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) {
uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int StateMatch_bits = 4;
int StateMatch_shift = 8;
uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits);
int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask;
if (StateMatch != 1 && StateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (StateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelFedCablingMapGPU *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
//set dummy values for cabling just to get detId from link
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
// set dummy values for cabling just to get detId from link if in Barrel
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 37:
case 38: {
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
GPU::SimpleVector<PixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = 9999;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = getLink(ww); // Extract link
uint32_t roc = getRoc(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(PixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.RawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0; //, ladder =0;
int side = 0, panel = 0, module = 0; //disk = 0, blade = 0
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
//disk = (rawId >> diskStartBit_) & diskMask_;
side = (panel == 1) ? -1 : 1;
//blade = (rawId >> bladeStartBit_) & bladeMask_;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask;
uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask;
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask;
uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask;
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::MaxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to MaxHitsInModule;
for (int i = first, iend = gpuClustering::MaxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = ::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::MaxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = ::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::MaxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::MaxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
// avoid overflow
constexpr auto MAX_HITS = gpuClustering::MaxNumClusters;
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (moduleStart[i] > MAX_HITS)
moduleStart[i] = MAX_HITS;
}
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
PixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
cuda::stream_t<> &stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::MaxNumModules, stream);
nModules_Clusters_h = cudautils::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cudautils::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cudautils::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(hipMemcpyAsync(
word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream.id()));
cudaCheck(hipMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream.id()));
// Launch rawToDigi kernel
hipLaunchKernelGGL(( RawToDigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(),
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(::max(int(wordCounter), int(gpuClustering::MaxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), digis_d.moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
hipLaunchKernelGGL(( countModules), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(),
digis_d.c_moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(hipGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(hipMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), hipMemcpyDefault, stream.id()));
threadsPerBlock = 256;
blocks = MaxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
hipLaunchKernelGGL(( findClus), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), digis_d.c_moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// apply charge cut
hipLaunchKernelGGL(( clusterChargeCut), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), digis_d.moduleInd(),
digis_d.c_adc(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.c_moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
hipLaunchKernelGGL(( fillHitsModuleStart), dim3(1), dim3(1024), 0, stream.id(), clusters_d.c_clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(hipMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::MaxNumModules,
sizeof(uint32_t),
hipMemcpyDefault,
stream.id()));
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
| 7399fe1862226d32be8de833422ed04a59e99136.cu | /* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
*
**/
// C++ includes
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
// CUDA includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
// cub includes
#include <cub/cub.cuh>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelFedCablingMapGPU.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cudautils::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
fedId_ = cudautils::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - 1200, length / 2);
}
////////////////////
__device__ uint32_t getLink(uint32_t ww) {
return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask);
}
__device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask); }
__device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); }
__device__ bool isBarrel(uint32_t rawId) { return (1 == ((rawId >> 25) & 0x7)); }
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
//printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol);
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
// debug = true;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
uint32_t numRowsInRoc = 80;
uint32_t numColsInRoc = 52;
/// row and collumn in ROC representation
return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
__device__ uint8_t checkROC(
uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) {
uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int StateMatch_bits = 4;
int StateMatch_shift = 8;
uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits);
int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask;
if (StateMatch != 1 && StateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (StateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelFedCablingMapGPU *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
//set dummy values for cabling just to get detId from link
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
// set dummy values for cabling just to get detId from link if in Barrel
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 37:
case 38: {
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
GPU::SimpleVector<PixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = 9999;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = getLink(ww); // Extract link
uint32_t roc = getRoc(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(PixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.RawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0; //, ladder =0;
int side = 0, panel = 0, module = 0; //disk = 0, blade = 0
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
//disk = (rawId >> diskStartBit_) & diskMask_;
side = (panel == 1) ? -1 : 1;
//blade = (rawId >> bladeStartBit_) & bladeMask_;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask;
uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask;
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask;
uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask;
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::MaxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to MaxHitsInModule;
for (int i = first, iend = gpuClustering::MaxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = std::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::MaxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = std::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::MaxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::MaxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
// avoid overflow
constexpr auto MAX_HITS = gpuClustering::MaxNumClusters;
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (moduleStart[i] > MAX_HITS)
moduleStart[i] = MAX_HITS;
}
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
PixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
cuda::stream_t<> &stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::MaxNumModules, stream);
nModules_Clusters_h = cudautils::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cudautils::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cudautils::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(cudaMemcpyAsync(
word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream.id()));
cudaCheck(cudaMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream.id()));
// Launch rawToDigi kernel
RawToDigi_kernel<<<blocks, threadsPerBlock, 0, stream.id()>>>(
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(std::max(int(wordCounter), int(gpuClustering::MaxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
gpuCalibPixel::calibDigis<<<blocks, threadsPerBlock, 0, stream.id()>>>(digis_d.moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
countModules<<<blocks, threadsPerBlock, 0, stream.id()>>>(
digis_d.c_moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(cudaGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(cudaMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), cudaMemcpyDefault, stream.id()));
threadsPerBlock = 256;
blocks = MaxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
findClus<<<blocks, threadsPerBlock, 0, stream.id()>>>(digis_d.c_moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// apply charge cut
clusterChargeCut<<<blocks, threadsPerBlock, 0, stream.id()>>>(digis_d.moduleInd(),
digis_d.c_adc(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.c_moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
fillHitsModuleStart<<<1, 1024, 0, stream.id()>>>(clusters_d.c_clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(cudaMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::MaxNumModules,
sizeof(uint32_t),
cudaMemcpyDefault,
stream.id()));
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
|
bde10af92faef251e63e28475e40e57924f47b12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CImg-2.5.0/CImg.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <cmath>
#include <time.h>
#include <windows.h>
#include <stddef.h>
#define cimg_use_jpeg 1
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define WRITE_FLATTEN_MATRIX false
#define GET_FLATTEN_MATRIX true
#include "device_launch_parameters.h"
using namespace cimg_library;
using namespace std;
void filter(double *matrix, double *copy, double *mask, int width, int height, int channels, int maskLength, double normalize);
void writeOrGetMatrix(CImg<double> &image, double *matrix, bool type);
double getNormalize(double *mask, int size);
void blow(double *mask, int size);
void sharpen(double *mask);
void prominence(double *mask);
void gradient(double *mask);
int init();
int getMaskLength(int filterType);
void setFilter(double *mask, int filterType);
void laplace(double *mask);
__global__ void dev_filter(double *dev_matrix, double *dev_kopia, double *dev_mask, int width, int height, int channels, int maskLength, double *dev_sample, double normalize){
int x = blockIdx.x*blockDim.x + threadIdx.x; // przypisanie wspolprzednej do aktualnego watku
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = x + y * gridDim.x; // numer watka w 1D
int sampleX = 0;
int sampleY = 0;
double result = 0;
int divMaskLength = (maskLength / 2)-1; // rozmiar maski liczony od srodka, czyli obrabianego piksela. Potrzeba do ptli ktora mnozy maske oraz probke.
if ((x != 0) && (y != 0) && (x != width - 1) && (y != height - 1)) { // Petla glowna, nie brane sa pod uwage piksele po zewnetrznej stronie obrazka
for (int xx = -divMaskLength; xx < (maskLength - divMaskLength); xx++) { // tworzona jest probka o wielkosci rownej masce, nastepnie kazdy element jest mnozony
for (int yy = -divMaskLength; yy < (maskLength - divMaskLength); yy++) {
dev_sample[sampleY + sampleX * maskLength] = dev_kopia[offset + yy + xx * width];
result += dev_sample[sampleY + sampleX * maskLength] * dev_mask[sampleY + sampleX * maskLength];
sampleY++;
}
sampleX++;
sampleY = 0;
}
if (normalize != 0) result /= normalize; // normalizacja
if (result >= 1) result = 1;
if (result <= 0) result = 0;
dev_matrix[offset] = result;
__syncthreads();
}
}
int main(void) {
int filterType = init();
int maskLength = getMaskLength(filterType);
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
int originalColor = 7;
int CPUcolor = 9;
int GPUcolor = 2;
time_t start;
time_t stop;
double timeResult = 0;
CImg<double> image("biedronka.jpg");
CImgDisplay display(image, "Oryginal");
int width = image.width();
int height = image.height();
int channels = image.spectrum();
CImg<double> output(width, height, 1, channels);
CImg<double> outputCPU(width, height, 1, channels);
double *matrix = new double[width * height * channels];
double *dev_matrix;
double *kopia = new double[width * height * channels];
double *dev_kopia;
double *mask = new double[maskLength * maskLength];
double *dev_mask;
double *sample = new double[maskLength * maskLength];
double *dev_sample;
writeOrGetMatrix(image, matrix, WRITE_FLATTEN_MATRIX);
writeOrGetMatrix(image, kopia, WRITE_FLATTEN_MATRIX);
setFilter(mask, filterType);
double normalize = getNormalize(mask, maskLength);
// ********************* CPU *************************
// ===================================================
start = clock();
filter(matrix, kopia, mask, width, height, channels, maskLength, normalize);
stop = clock();
timeResult = (double)(stop - start) / CLOCKS_PER_SEC;
SetConsoleTextAttribute(hConsole, CPUcolor);
cout << " CZAS CPU: ";
SetConsoleTextAttribute(hConsole, originalColor);
cout << timeResult << "s";
cout << endl << endl;
writeOrGetMatrix(outputCPU, matrix, GET_FLATTEN_MATRIX);
CImgDisplay display2(outputCPU, "CPU");
writeOrGetMatrix(image, matrix, WRITE_FLATTEN_MATRIX);
writeOrGetMatrix(image, kopia, WRITE_FLATTEN_MATRIX);
// ********************* GPU *************************
// ===================================================
int sizeX = width;
int sizeY = height * channels;
int TILE = 1;
dim3 block(TILE, TILE);
int grid_x = sizeX;
int grid_y = sizeY;
dim3 grid(grid_x, grid_y);
// ************* ALOKACJA PAMIECI NA GPU *************
// ===================================================
HANDLE_ERROR(hipMalloc((void**)&dev_matrix, width * height * channels * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_kopia, width * height * channels * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_mask, maskLength * maskLength * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_sample, maskLength * maskLength * sizeof(double)));
// ************* KOPIOWANIE PAMIECI DO GPU *************
// =====================================================
HANDLE_ERROR(hipMemcpy(dev_matrix, matrix, width * height * channels * sizeof(double), hipMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(hipMemcpy(dev_kopia, matrix, width * height * channels * sizeof(double), hipMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(hipMemcpy(dev_mask, mask, maskLength * maskLength * sizeof(double), hipMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(hipMemcpy(dev_sample, sample, maskLength * maskLength * sizeof(double), hipMemcpyHostToDevice)); // kopiowanie do GPU
// **************** WYWOLANIE KERNELA ******************
// =====================================================
start = clock();
hipLaunchKernelGGL(( dev_filter) , dim3(grid), dim3(block) , 0, 0, dev_matrix, dev_kopia, dev_mask, width, height, channels, maskLength, dev_sample, normalize);
stop = clock();
timeResult = (double)(stop - start) / CLOCKS_PER_SEC;
SetConsoleTextAttribute(hConsole, GPUcolor);
cout << " CZAS GPU: ";
SetConsoleTextAttribute(hConsole, originalColor);
cout << timeResult << "s";
cout << endl << endl;
// ************* KOPIOWANIE PAMIECI DO CPU *************
// =====================================================
HANDLE_ERROR(hipMemcpy(matrix, dev_matrix, width * height * channels * sizeof(double), hipMemcpyDeviceToHost)); // kopiowanie z GPU do CPU
writeOrGetMatrix(output, matrix, GET_FLATTEN_MATRIX);
CImgDisplay display3(output, "GPU");
while (!(display.is_closed() && display2.is_closed() && display3.is_closed())){
display.wait();
display2.wait();
display3.wait();
}
// **************** ZWOLNIENIE PAMIECI *****************
// =====================================================
delete[] mask;
delete[] matrix;
delete[] kopia;
delete[] sample;
hipFree(dev_matrix);
hipFree(dev_kopia);
hipFree(dev_mask);
return 0;
}
void writeOrGetMatrix(CImg<double> &image, double *matrix, bool type) {
int width = image.width();
int height = image.height();
int channels = image.spectrum();
for (int c = 0; c < channels; c++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if(!type) matrix[x + y * width + c * width * height] = image(x, y, c)/255;
else image(x, y, c) = matrix[x + y * width + c * width * height];
}
}
}
}
double getNormalize(double *mask, int size) {
double result = 0;
for (int i = 0; i < size * size; i++) {
result += mask[i];
}
return result;
}
void blow(double *mask, int size) {
for (int i = 0; i < size * size; i++) {
mask[i] = 1;
}
}
void sharpen(double *mask) {
mask[0] = 0;
mask[1] = -1;
mask[2] = 0;
mask[3] = -1;
mask[4] = 5;
mask[5] = -1;
mask[6] = 0;
mask[7] = -1;
mask[8] = 0;
}
void prominence(double *mask) {
mask[0] = -1;
mask[1] = 0;
mask[2] = 1;
mask[3] = -1;
mask[4] = 1;
mask[5] = 1;
mask[6] = -1;
mask[7] = 0;
mask[8] = 1;
}
void gradient(double *mask) {
mask[0] = 0;
mask[1] = 0;
mask[2] = 0;
mask[3] = -1;
mask[4] = 1;
mask[5] = 0;
mask[6] = 0;
mask[7] = 0;
mask[8] = 0;
}
void laplace(double *mask) {
mask[0] = -1;
mask[1] = -1;
mask[2] = -1;
mask[3] = -1;
mask[4] = 8;
mask[5] = -1;
mask[6] = -1;
mask[7] = -1;
mask[8] = -1;
}
void filter(double *matrix, double *copy, double *mask, int width, int height, int channels, int maskLength, double normalize) {
double result = 0;
int sampleX = 0;
int sampleY = 0;
double divMaskLength = floor(maskLength / 2);
int index = 0;
double *sample = new double[maskLength * maskLength];
int cnt = 0;
double mnozenie = 0;
for (int k = 0; k < 3; k++) {
for (int i = divMaskLength; i < (height - divMaskLength); i++) {
for (int j = divMaskLength; j < (width - divMaskLength); j++) {
index = j + (i * width) + (k * width * height);
for (int x = -divMaskLength; x < (maskLength - divMaskLength); x++) {
for (int y = -divMaskLength; y < (maskLength - divMaskLength); y++) {
sample[sampleY + sampleX * maskLength] = copy[index + y + x * width];
mnozenie = (sample[sampleY + sampleX * maskLength] * mask[sampleY + sampleX * maskLength]);
result = result + mnozenie;
sampleY++;
}
sampleX++;
sampleY = 0;
}
if(normalize != 0) result = result / normalize;
if (result >= 1) result = 1;
if (result <= 0) result = 0;
matrix[index] = result;
result = 0;
sampleX = 0;
sampleY = 0;
}
}
}
delete[] sample;
}
int init() {
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
int availableColor = 16;
int originalColor = 7;
int setColor = 14;
int errorColor = 12;
int maskLength;
string text;
int number;
int filtersCount = 5;
string filtres[5] = {
" * (1) ROZMYCIE",
" * (2) WYOSTRZENIE",
" * (3) UWYDATNIENIE",
" * (4) GRADIENT",
" * (5) LAPLACE"
};
do {
system("cls");
SetConsoleTextAttribute(hConsole, availableColor);
cout << " Dostepne filtry: " << endl;
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------" << endl;
cout << filtres[0] << endl;
cout << filtres[1] << endl;
cout << filtres[2] << endl;
cout << filtres[3] << endl;
cout << filtres[4] << endl;
cout << "---------------------------";
cout << endl << endl;
cout << " Wprowadz numer: ";
cin >> text;
number = atoi(text.c_str());
number -= 1;
if (!(number > -1 && number < filtersCount)) {
SetConsoleTextAttribute(hConsole, errorColor);
cout << " error!";
SetConsoleTextAttribute(hConsole, originalColor);
Sleep(1000);
}
else {
Sleep(250);
}
} while (!(number > -1 && number < filtersCount));
cout << "---------------------------" << endl;
cout << endl << endl;
system("cls");
SetConsoleTextAttribute(hConsole, availableColor);
cout << " Dostepne filtry: " << endl;
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------" << endl;
for (int i = 0; i < filtersCount; i++) {
if (i == number) {
SetConsoleTextAttribute(hConsole, setColor);
cout << filtres[i] << endl;
}
else {
SetConsoleTextAttribute(hConsole, originalColor);
cout << filtres[i] << endl;
}
}
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------";
cout << endl << endl;
return number + 1;
}
int getMaskLength(int filterType) {
switch (filterType) {
case 1:
return 5;
break;
default:
return 3;
break;
}
}
void setFilter(double *mask, int filterType) {
switch (filterType) {
case 1:
blow(mask, 5);
break;
case 2:
sharpen(mask);
break;
case 3:
prominence(mask);
break;
case 4:
gradient(mask);
break;
case 5:
laplace(mask);
break;
}
}
| bde10af92faef251e63e28475e40e57924f47b12.cu |
#include "cuda_runtime.h"
#include "CImg-2.5.0/CImg.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <cmath>
#include <time.h>
#include <windows.h>
#include <stddef.h>
#define cimg_use_jpeg 1
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define WRITE_FLATTEN_MATRIX false
#define GET_FLATTEN_MATRIX true
#include "device_launch_parameters.h"
using namespace cimg_library;
using namespace std;
void filter(double *matrix, double *copy, double *mask, int width, int height, int channels, int maskLength, double normalize);
void writeOrGetMatrix(CImg<double> &image, double *matrix, bool type);
double getNormalize(double *mask, int size);
void blow(double *mask, int size);
void sharpen(double *mask);
void prominence(double *mask);
void gradient(double *mask);
int init();
int getMaskLength(int filterType);
void setFilter(double *mask, int filterType);
void laplace(double *mask);
__global__ void dev_filter(double *dev_matrix, double *dev_kopia, double *dev_mask, int width, int height, int channels, int maskLength, double *dev_sample, double normalize){
int x = blockIdx.x*blockDim.x + threadIdx.x; // przypisanie wspolprzednej do aktualnego watku
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = x + y * gridDim.x; // numer watka w 1D
int sampleX = 0;
int sampleY = 0;
double result = 0;
int divMaskLength = (maskLength / 2)-1; // rozmiar maski liczony od srodka, czyli obrabianego piksela. Potrzeba do pÍtli ktora mnozy maske oraz probke.
if ((x != 0) && (y != 0) && (x != width - 1) && (y != height - 1)) { // Petla glowna, nie brane sa pod uwage piksele po zewnetrznej stronie obrazka
for (int xx = -divMaskLength; xx < (maskLength - divMaskLength); xx++) { // tworzona jest probka o wielkosci rownej masce, nastepnie kazdy element jest mnozony
for (int yy = -divMaskLength; yy < (maskLength - divMaskLength); yy++) {
dev_sample[sampleY + sampleX * maskLength] = dev_kopia[offset + yy + xx * width];
result += dev_sample[sampleY + sampleX * maskLength] * dev_mask[sampleY + sampleX * maskLength];
sampleY++;
}
sampleX++;
sampleY = 0;
}
if (normalize != 0) result /= normalize; // normalizacja
if (result >= 1) result = 1;
if (result <= 0) result = 0;
dev_matrix[offset] = result;
__syncthreads();
}
}
int main(void) {
int filterType = init();
int maskLength = getMaskLength(filterType);
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
int originalColor = 7;
int CPUcolor = 9;
int GPUcolor = 2;
time_t start;
time_t stop;
double timeResult = 0;
CImg<double> image("biedronka.jpg");
CImgDisplay display(image, "Oryginal");
int width = image.width();
int height = image.height();
int channels = image.spectrum();
CImg<double> output(width, height, 1, channels);
CImg<double> outputCPU(width, height, 1, channels);
double *matrix = new double[width * height * channels];
double *dev_matrix;
double *kopia = new double[width * height * channels];
double *dev_kopia;
double *mask = new double[maskLength * maskLength];
double *dev_mask;
double *sample = new double[maskLength * maskLength];
double *dev_sample;
writeOrGetMatrix(image, matrix, WRITE_FLATTEN_MATRIX);
writeOrGetMatrix(image, kopia, WRITE_FLATTEN_MATRIX);
setFilter(mask, filterType);
double normalize = getNormalize(mask, maskLength);
// ********************* CPU *************************
// ===================================================
start = clock();
filter(matrix, kopia, mask, width, height, channels, maskLength, normalize);
stop = clock();
timeResult = (double)(stop - start) / CLOCKS_PER_SEC;
SetConsoleTextAttribute(hConsole, CPUcolor);
cout << " CZAS CPU: ";
SetConsoleTextAttribute(hConsole, originalColor);
cout << timeResult << "s";
cout << endl << endl;
writeOrGetMatrix(outputCPU, matrix, GET_FLATTEN_MATRIX);
CImgDisplay display2(outputCPU, "CPU");
writeOrGetMatrix(image, matrix, WRITE_FLATTEN_MATRIX);
writeOrGetMatrix(image, kopia, WRITE_FLATTEN_MATRIX);
// ********************* GPU *************************
// ===================================================
int sizeX = width;
int sizeY = height * channels;
int TILE = 1;
dim3 block(TILE, TILE);
int grid_x = sizeX;
int grid_y = sizeY;
dim3 grid(grid_x, grid_y);
// ************* ALOKACJA PAMIECI NA GPU *************
// ===================================================
HANDLE_ERROR(cudaMalloc((void**)&dev_matrix, width * height * channels * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_kopia, width * height * channels * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_mask, maskLength * maskLength * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_sample, maskLength * maskLength * sizeof(double)));
// ************* KOPIOWANIE PAMIECI DO GPU *************
// =====================================================
HANDLE_ERROR(cudaMemcpy(dev_matrix, matrix, width * height * channels * sizeof(double), cudaMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(cudaMemcpy(dev_kopia, matrix, width * height * channels * sizeof(double), cudaMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(cudaMemcpy(dev_mask, mask, maskLength * maskLength * sizeof(double), cudaMemcpyHostToDevice)); // kopiowanie do GPU
HANDLE_ERROR(cudaMemcpy(dev_sample, sample, maskLength * maskLength * sizeof(double), cudaMemcpyHostToDevice)); // kopiowanie do GPU
// **************** WYWOLANIE KERNELA ******************
// =====================================================
start = clock();
dev_filter <<<grid, block >>> (dev_matrix, dev_kopia, dev_mask, width, height, channels, maskLength, dev_sample, normalize);
stop = clock();
timeResult = (double)(stop - start) / CLOCKS_PER_SEC;
SetConsoleTextAttribute(hConsole, GPUcolor);
cout << " CZAS GPU: ";
SetConsoleTextAttribute(hConsole, originalColor);
cout << timeResult << "s";
cout << endl << endl;
// ************* KOPIOWANIE PAMIECI DO CPU *************
// =====================================================
HANDLE_ERROR(cudaMemcpy(matrix, dev_matrix, width * height * channels * sizeof(double), cudaMemcpyDeviceToHost)); // kopiowanie z GPU do CPU
writeOrGetMatrix(output, matrix, GET_FLATTEN_MATRIX);
CImgDisplay display3(output, "GPU");
while (!(display.is_closed() && display2.is_closed() && display3.is_closed())){
display.wait();
display2.wait();
display3.wait();
}
// **************** ZWOLNIENIE PAMIECI *****************
// =====================================================
delete[] mask;
delete[] matrix;
delete[] kopia;
delete[] sample;
cudaFree(dev_matrix);
cudaFree(dev_kopia);
cudaFree(dev_mask);
return 0;
}
void writeOrGetMatrix(CImg<double> &image, double *matrix, bool type) {
int width = image.width();
int height = image.height();
int channels = image.spectrum();
for (int c = 0; c < channels; c++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if(!type) matrix[x + y * width + c * width * height] = image(x, y, c)/255;
else image(x, y, c) = matrix[x + y * width + c * width * height];
}
}
}
}
double getNormalize(double *mask, int size) {
double result = 0;
for (int i = 0; i < size * size; i++) {
result += mask[i];
}
return result;
}
void blow(double *mask, int size) {
for (int i = 0; i < size * size; i++) {
mask[i] = 1;
}
}
void sharpen(double *mask) {
mask[0] = 0;
mask[1] = -1;
mask[2] = 0;
mask[3] = -1;
mask[4] = 5;
mask[5] = -1;
mask[6] = 0;
mask[7] = -1;
mask[8] = 0;
}
void prominence(double *mask) {
mask[0] = -1;
mask[1] = 0;
mask[2] = 1;
mask[3] = -1;
mask[4] = 1;
mask[5] = 1;
mask[6] = -1;
mask[7] = 0;
mask[8] = 1;
}
void gradient(double *mask) {
mask[0] = 0;
mask[1] = 0;
mask[2] = 0;
mask[3] = -1;
mask[4] = 1;
mask[5] = 0;
mask[6] = 0;
mask[7] = 0;
mask[8] = 0;
}
void laplace(double *mask) {
mask[0] = -1;
mask[1] = -1;
mask[2] = -1;
mask[3] = -1;
mask[4] = 8;
mask[5] = -1;
mask[6] = -1;
mask[7] = -1;
mask[8] = -1;
}
void filter(double *matrix, double *copy, double *mask, int width, int height, int channels, int maskLength, double normalize) {
double result = 0;
int sampleX = 0;
int sampleY = 0;
double divMaskLength = floor(maskLength / 2);
int index = 0;
double *sample = new double[maskLength * maskLength];
int cnt = 0;
double mnozenie = 0;
for (int k = 0; k < 3; k++) {
for (int i = divMaskLength; i < (height - divMaskLength); i++) {
for (int j = divMaskLength; j < (width - divMaskLength); j++) {
index = j + (i * width) + (k * width * height);
for (int x = -divMaskLength; x < (maskLength - divMaskLength); x++) {
for (int y = -divMaskLength; y < (maskLength - divMaskLength); y++) {
sample[sampleY + sampleX * maskLength] = copy[index + y + x * width];
mnozenie = (sample[sampleY + sampleX * maskLength] * mask[sampleY + sampleX * maskLength]);
result = result + mnozenie;
sampleY++;
}
sampleX++;
sampleY = 0;
}
if(normalize != 0) result = result / normalize;
if (result >= 1) result = 1;
if (result <= 0) result = 0;
matrix[index] = result;
result = 0;
sampleX = 0;
sampleY = 0;
}
}
}
delete[] sample;
}
int init() {
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
int availableColor = 16;
int originalColor = 7;
int setColor = 14;
int errorColor = 12;
int maskLength;
string text;
int number;
int filtersCount = 5;
string filtres[5] = {
" * (1) ROZMYCIE",
" * (2) WYOSTRZENIE",
" * (3) UWYDATNIENIE",
" * (4) GRADIENT",
" * (5) LAPLACE"
};
do {
system("cls");
SetConsoleTextAttribute(hConsole, availableColor);
cout << " Dostepne filtry: " << endl;
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------" << endl;
cout << filtres[0] << endl;
cout << filtres[1] << endl;
cout << filtres[2] << endl;
cout << filtres[3] << endl;
cout << filtres[4] << endl;
cout << "---------------------------";
cout << endl << endl;
cout << " Wprowadz numer: ";
cin >> text;
number = atoi(text.c_str());
number -= 1;
if (!(number > -1 && number < filtersCount)) {
SetConsoleTextAttribute(hConsole, errorColor);
cout << " error!";
SetConsoleTextAttribute(hConsole, originalColor);
Sleep(1000);
}
else {
Sleep(250);
}
} while (!(number > -1 && number < filtersCount));
cout << "---------------------------" << endl;
cout << endl << endl;
system("cls");
SetConsoleTextAttribute(hConsole, availableColor);
cout << " Dostepne filtry: " << endl;
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------" << endl;
for (int i = 0; i < filtersCount; i++) {
if (i == number) {
SetConsoleTextAttribute(hConsole, setColor);
cout << filtres[i] << endl;
}
else {
SetConsoleTextAttribute(hConsole, originalColor);
cout << filtres[i] << endl;
}
}
SetConsoleTextAttribute(hConsole, originalColor);
cout << "---------------------------";
cout << endl << endl;
return number + 1;
}
int getMaskLength(int filterType) {
switch (filterType) {
case 1:
return 5;
break;
default:
return 3;
break;
}
}
void setFilter(double *mask, int filterType) {
switch (filterType) {
case 1:
blow(mask, 5);
break;
case 2:
sharpen(mask);
break;
case 3:
prominence(mask);
break;
case 4:
gradient(mask);
break;
case 5:
laplace(mask);
break;
}
}
|
429e12cc8809769ac054fae0455a23662ebc7815.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "debug.h"
#include "timer.h"
#include "utils_cuda.h"
#include "utils_file.h"
#include "params.h"
#define WARP 32
int device=0;
__device__ __inline__ float2 Get_W_value(int N, int m){
float2 ctemp;
ctemp.x=-cosf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f );
ctemp.y=sinf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f );
return(ctemp);
}
__device__ void do_FFT(float2 *s_input){ // in-place
float2 DFT_value_even[2], DFT_value_odd[2], ftemp2, ftemp;
float2 W;
int r, j[2], k0, k1, PoT, PoTm1, A_index, B_index, Nhalf;
Nhalf=FFT_LENGTH>>1;
//-----> FFT
//-->
PoT=1;
PoTm1=0;
// --------------------------------------------------------------------------------------------------------
// First iteration where we do not actually need to calculate the twiddle factors r=1 k0=0;
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x;
j[1]=(threadIdx.x+blockDim.x);
W.x=1;
W.y=0;
// first two elements of this thread
A_index=j[0]*PoTm1;
B_index=j[0]*PoTm1 + Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
A_index=j[1]*PoTm1;
B_index=j[1]*PoTm1 + Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT]=DFT_value_even[0];
s_input[j[0]*PoT + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT]=DFT_value_even[1];
s_input[j[1]*PoT + PoTm1]=DFT_value_odd[1];
__syncthreads();
// First iteration
// --------------------------------------------------------------------------------------------------------
for(r=2;r<=(FFT_EXP-1);r++){
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x>>(r-1);
j[1]=(threadIdx.x+blockDim.x)>>(r-1);
k0=threadIdx.x & (PoTm1-1);
W=Get_W_value(PoT,k0);
// first two elements of this thread
A_index=j[0]*PoTm1+k0;
B_index=j[0]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
A_index=j[1]*PoTm1+k0;
B_index=j[1]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT + k0]=DFT_value_even[0];
s_input[j[0]*PoT + k0 + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT + k0]=DFT_value_even[1];
s_input[j[1]*PoT + k0 + PoTm1]=DFT_value_odd[1];
__syncthreads();
}
// --------------------------------------------------------------------------------------------------------
// Last iteration
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x>>(r-1);
j[1]=(threadIdx.x+blockDim.x)>>(r-1);
k0=threadIdx.x & (PoTm1-1);
k1=(threadIdx.x+blockDim.x) & (PoTm1-1);
// first two elements of this thread
W=Get_W_value(PoT,k0);
A_index=j[0]*PoTm1+k0;
B_index=j[0]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
W=Get_W_value(PoT,k1);
A_index=j[1]*PoTm1+k1;
B_index=j[1]*PoTm1+k1+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT + k0]=DFT_value_even[0];
s_input[j[0]*PoT + k0 + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT + k1]=DFT_value_even[1];
s_input[j[1]*PoT + k1 + PoTm1]=DFT_value_odd[1];
__syncthreads();
// Last iteration
// --------------------------------------------------------------------------------------------------------
//-------> END
}
__global__ void FFT_GPU_external(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
#pragma unroll
for(int f=0; f<4; f++){
s_input[threadIdx.x + f*(FFT_LENGTH/4)]=d_input[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH];
}
__syncthreads();
do_FFT(s_input);
__syncthreads();
#pragma unroll
for(int f=0; f<4; f++){
d_output[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + f*(FFT_LENGTH/4)];
}
}
__global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
#pragma unroll
for(int f=0; f<4; f++){
s_input[threadIdx.x + f*(FFT_LENGTH/4)]=d_input[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH];
}
__syncthreads();
for(int f=0;f<100;f++){
do_FFT(s_input);
}
__syncthreads();
#pragma unroll
for(int f=0; f<4; f++){
d_output[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + f*(FFT_LENGTH/4)];
}
}
int Max_columns_in_memory_shared(int nSamples, int nSpectra) {
long int nColumns,maxgrid_x;
size_t free_mem,total_mem;
hipDeviceProp_t devProp;
checkCudaErrors(hipSetDevice(device));
checkCudaErrors(hipGetDeviceProperties(&devProp,device));
maxgrid_x = devProp.maxGridSize[0];
hipMemGetInfo(&free_mem,&total_mem);
nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples);
if(nColumns>maxgrid_x) nColumns=maxgrid_x;
nColumns=(int) nColumns*0.9;
return(nColumns);
}
void FFT_init(){
//---------> Specific nVidia stuff
hipDeviceSetCacheConfig(hipFuncCachePreferEqual);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
}
void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1;
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1);
dim3 blockSize(nSamples/4, 1, 1);
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_external), dim3(gridSize), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(1000, 1, 1);
dim3 blockSize(nSamples/4, 1, 1);
//---------> FIR filter part
timer.Start();
hipLaunchKernelGGL(( FFT_GPU_multiple), dim3(gridSize_multiple), dim3(blockSize),nSamples*8, 0, d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
int GPU_FFT(float2 *h_input, float2 *h_output, int nSamples, int nSpectra, int nRuns){
//---------> Initial nVidia stuff
int devCount;
size_t free_mem,total_mem;
checkCudaErrors(hipGetDeviceCount(&devCount));
checkCudaErrors(hipSetDevice(device));
hipMemGetInfo(&free_mem,&total_mem);
if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000));
//---------> Checking memory
int nElements=nSamples*nSpectra;
int input_size=nElements;
int output_size=nElements;
float free_memory = (float) free_mem/(1024.0*1024.0);
float memory_required=((2*input_size + 2*output_size)*sizeof(float))/(1024.0*1024.0);
if(DEBUG) printf("DEBUG: Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required);
if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);}
//---------> Measurements
double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time;
double FFT_multiple_time_total, FFT_external_time_total;
GpuTimer timer;
//------------------------------------------------------------------------------
//---------> Shared memory kernel
transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0;
FFT_multiple_time_total = 0; FFT_external_time_total = 0;
//---------> Memory allocation
if (DEBUG) printf("Device memory allocation...: \t\t");
float2 *d_output;
float2 *d_input;
timer.Start();
checkCudaErrors(hipMalloc((void **) &d_input, sizeof(float2)*input_size));
checkCudaErrors(hipMalloc((void **) &d_output, sizeof(float2)*output_size));
timer.Stop();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> FFT calculation
if (DEBUG) printf("Transferring data to device...: \t");
timer.Start();
checkCudaErrors(hipMemcpy(d_input, h_input, input_size*sizeof(float2), hipMemcpyHostToDevice));
timer.Stop();
transfer_in+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//-----> Compute FFT on the chunk
if(CUFFT){
//---------> FFT
hipfftHandle plan;
hipfftResult error;
error = hipfftPlan1d(&plan, nSamples, HIPFFT_C2C, nSpectra);
if (HIPFFT_SUCCESS != error){
printf("CUFFT error: %d", error);
}
timer.Start();
hipfftExecC2C(plan, (hipfftComplex *)d_input, (hipfftComplex *)d_output, HIPFFT_FORWARD);
timer.Stop();
cuFFT_time += timer.Elapsed();
hipfftDestroy(plan);
}
if(MULTIPLE){
if (DEBUG) printf("Multiple FFT...: \t\t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, h_input, input_size*sizeof(float2), hipMemcpyHostToDevice));
FFT_multiple_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("FFT...: \t\t\t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(hipMemcpy(d_input, h_input, input_size*sizeof(float2), hipMemcpyHostToDevice));
FFT_external_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//-----> Copy chunk of output data to host
if (DEBUG) printf("Transferring data to host...: \t\t");
timer.Start();
checkCudaErrors(hipMemcpy( h_output, d_output, output_size*sizeof(float2), hipMemcpyDeviceToHost));
timer.Stop();
transfer_out+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> error check -----
checkCudaErrors(hipGetLastError());
//---------> Feeing allocated resources
checkCudaErrors(hipFree(d_input));
checkCudaErrors(hipFree(d_output));
if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms;\n",nSpectra,nSamples,cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time);
if (WRITE){
char str[200];
sprintf(str,"GPU-FFT-Stockham.dat");
if (DEBUG) printf("\n Write results into file...\t");
save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out);
if (DEBUG) printf("\t done.\n-------------------------------------\n");
}
return(1);
}
| 429e12cc8809769ac054fae0455a23662ebc7815.cu | #include <cufft.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "debug.h"
#include "timer.h"
#include "utils_cuda.h"
#include "utils_file.h"
#include "params.h"
#define WARP 32
int device=0;
__device__ __inline__ float2 Get_W_value(int N, int m){
float2 ctemp;
ctemp.x=-cosf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f );
ctemp.y=sinf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f );
return(ctemp);
}
__device__ void do_FFT(float2 *s_input){ // in-place
float2 DFT_value_even[2], DFT_value_odd[2], ftemp2, ftemp;
float2 W;
int r, j[2], k0, k1, PoT, PoTm1, A_index, B_index, Nhalf;
Nhalf=FFT_LENGTH>>1;
//-----> FFT
//-->
PoT=1;
PoTm1=0;
// --------------------------------------------------------------------------------------------------------
// First iteration where we do not actually need to calculate the twiddle factors r=1 k0=0;
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x;
j[1]=(threadIdx.x+blockDim.x);
W.x=1;
W.y=0;
// first two elements of this thread
A_index=j[0]*PoTm1;
B_index=j[0]*PoTm1 + Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
A_index=j[1]*PoTm1;
B_index=j[1]*PoTm1 + Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT]=DFT_value_even[0];
s_input[j[0]*PoT + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT]=DFT_value_even[1];
s_input[j[1]*PoT + PoTm1]=DFT_value_odd[1];
__syncthreads();
// First iteration
// --------------------------------------------------------------------------------------------------------
for(r=2;r<=(FFT_EXP-1);r++){
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x>>(r-1);
j[1]=(threadIdx.x+blockDim.x)>>(r-1);
k0=threadIdx.x & (PoTm1-1);
W=Get_W_value(PoT,k0);
// first two elements of this thread
A_index=j[0]*PoTm1+k0;
B_index=j[0]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
A_index=j[1]*PoTm1+k0;
B_index=j[1]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT + k0]=DFT_value_even[0];
s_input[j[0]*PoT + k0 + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT + k0]=DFT_value_even[1];
s_input[j[1]*PoT + k0 + PoTm1]=DFT_value_odd[1];
__syncthreads();
}
// --------------------------------------------------------------------------------------------------------
// Last iteration
PoTm1=PoT;
PoT=PoT<<1;
j[0]=threadIdx.x>>(r-1);
j[1]=(threadIdx.x+blockDim.x)>>(r-1);
k0=threadIdx.x & (PoTm1-1);
k1=(threadIdx.x+blockDim.x) & (PoTm1-1);
// first two elements of this thread
W=Get_W_value(PoT,k0);
A_index=j[0]*PoTm1+k0;
B_index=j[0]*PoTm1+k0+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[0].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[0].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[0].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[0].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
// second two elements of the thread
W=Get_W_value(PoT,k1);
A_index=j[1]*PoTm1+k1;
B_index=j[1]*PoTm1+k1+Nhalf;
ftemp2=s_input[B_index];
ftemp=s_input[A_index];
DFT_value_even[1].x=ftemp.x + W.x*ftemp2.x - W.y*ftemp2.y;
DFT_value_even[1].y=ftemp.y + W.x*ftemp2.y + W.y*ftemp2.x;
DFT_value_odd[1].x=ftemp.x - W.x*ftemp2.x + W.y*ftemp2.y;
DFT_value_odd[1].y=ftemp.y - W.x*ftemp2.y - W.y*ftemp2.x;
__syncthreads();
s_input[j[0]*PoT + k0]=DFT_value_even[0];
s_input[j[0]*PoT + k0 + PoTm1]=DFT_value_odd[0];
s_input[j[1]*PoT + k1]=DFT_value_even[1];
s_input[j[1]*PoT + k1 + PoTm1]=DFT_value_odd[1];
__syncthreads();
// Last iteration
// --------------------------------------------------------------------------------------------------------
//-------> END
}
__global__ void FFT_GPU_external(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
#pragma unroll
for(int f=0; f<4; f++){
s_input[threadIdx.x + f*(FFT_LENGTH/4)]=d_input[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH];
}
__syncthreads();
do_FFT(s_input);
__syncthreads();
#pragma unroll
for(int f=0; f<4; f++){
d_output[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + f*(FFT_LENGTH/4)];
}
}
__global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) {
extern __shared__ float2 s_input[];
#pragma unroll
for(int f=0; f<4; f++){
s_input[threadIdx.x + f*(FFT_LENGTH/4)]=d_input[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH];
}
__syncthreads();
for(int f=0;f<100;f++){
do_FFT(s_input);
}
__syncthreads();
#pragma unroll
for(int f=0; f<4; f++){
d_output[threadIdx.x + f*(FFT_LENGTH/4) + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + f*(FFT_LENGTH/4)];
}
}
int Max_columns_in_memory_shared(int nSamples, int nSpectra) {
long int nColumns,maxgrid_x;
size_t free_mem,total_mem;
cudaDeviceProp devProp;
checkCudaErrors(cudaSetDevice(device));
checkCudaErrors(cudaGetDeviceProperties(&devProp,device));
maxgrid_x = devProp.maxGridSize[0];
cudaMemGetInfo(&free_mem,&total_mem);
nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples);
if(nColumns>maxgrid_x) nColumns=maxgrid_x;
nColumns=(int) nColumns*0.9;
return(nColumns);
}
void FFT_init(){
//---------> Specific nVidia stuff
cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
}
void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
int nCUDAblocks_x=nSpectra;
int nCUDAblocks_y=1;
dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1);
dim3 blockSize(nSamples/4, 1, 1);
//---------> FIR filter part
timer.Start();
FFT_GPU_external<<<gridSize, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){
GpuTimer timer;
//---------> CUDA block and CUDA grid parameters
dim3 gridSize_multiple(1000, 1, 1);
dim3 blockSize(nSamples/4, 1, 1);
//---------> FIR filter part
timer.Start();
FFT_GPU_multiple<<<gridSize_multiple, blockSize,nSamples*8>>>( d_input, d_output);
timer.Stop();
*FFT_time += timer.Elapsed();
}
int GPU_FFT(float2 *h_input, float2 *h_output, int nSamples, int nSpectra, int nRuns){
//---------> Initial nVidia stuff
int devCount;
size_t free_mem,total_mem;
checkCudaErrors(cudaGetDeviceCount(&devCount));
checkCudaErrors(cudaSetDevice(device));
cudaMemGetInfo(&free_mem,&total_mem);
if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000));
//---------> Checking memory
int nElements=nSamples*nSpectra;
int input_size=nElements;
int output_size=nElements;
float free_memory = (float) free_mem/(1024.0*1024.0);
float memory_required=((2*input_size + 2*output_size)*sizeof(float))/(1024.0*1024.0);
if(DEBUG) printf("DEBUG: Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required);
if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);}
//---------> Measurements
double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time;
double FFT_multiple_time_total, FFT_external_time_total;
GpuTimer timer;
//------------------------------------------------------------------------------
//---------> Shared memory kernel
transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0;
FFT_multiple_time_total = 0; FFT_external_time_total = 0;
//---------> Memory allocation
if (DEBUG) printf("Device memory allocation...: \t\t");
float2 *d_output;
float2 *d_input;
timer.Start();
checkCudaErrors(cudaMalloc((void **) &d_input, sizeof(float2)*input_size));
checkCudaErrors(cudaMalloc((void **) &d_output, sizeof(float2)*output_size));
timer.Stop();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> FFT calculation
if (DEBUG) printf("Transferring data to device...: \t");
timer.Start();
checkCudaErrors(cudaMemcpy(d_input, h_input, input_size*sizeof(float2), cudaMemcpyHostToDevice));
timer.Stop();
transfer_in+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//-----> Compute FFT on the chunk
if(CUFFT){
//---------> FFT
cufftHandle plan;
cufftResult error;
error = cufftPlan1d(&plan, nSamples, CUFFT_C2C, nSpectra);
if (CUFFT_SUCCESS != error){
printf("CUFFT error: %d", error);
}
timer.Start();
cufftExecC2C(plan, (cufftComplex *)d_input, (cufftComplex *)d_output, CUFFT_FORWARD);
timer.Stop();
cuFFT_time += timer.Elapsed();
cufftDestroy(plan);
}
if(MULTIPLE){
if (DEBUG) printf("Multiple FFT...: \t\t\t");
FFT_init();
FFT_multiple_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, h_input, input_size*sizeof(float2), cudaMemcpyHostToDevice));
FFT_multiple_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_multiple_time_total);
}
FFT_multiple_time = FFT_multiple_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time);
}
if(EXTERNAL){
if (DEBUG) printf("FFT...: \t\t\t\t");
FFT_init();
FFT_external_time_total = 0;
for(int f=0; f<nRuns; f++){
checkCudaErrors(cudaMemcpy(d_input, h_input, input_size*sizeof(float2), cudaMemcpyHostToDevice));
FFT_external_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_external_time_total);
}
FFT_external_time = FFT_external_time_total/nRuns;
if (DEBUG) printf("done in %g ms.\n", FFT_external_time);
}
//-----> Copy chunk of output data to host
if (DEBUG) printf("Transferring data to host...: \t\t");
timer.Start();
checkCudaErrors(cudaMemcpy( h_output, d_output, output_size*sizeof(float2), cudaMemcpyDeviceToHost));
timer.Stop();
transfer_out+=timer.Elapsed();
if (DEBUG) printf("done in %g ms.\n", timer.Elapsed());
//---------> error check -----
checkCudaErrors(cudaGetLastError());
//---------> Feeing allocated resources
checkCudaErrors(cudaFree(d_input));
checkCudaErrors(cudaFree(d_output));
if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms;\n",nSpectra,nSamples,cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time);
if (WRITE){
char str[200];
sprintf(str,"GPU-FFT-Stockham.dat");
if (DEBUG) printf("\n Write results into file...\t");
save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out);
if (DEBUG) printf("\t done.\n-------------------------------------\n");
}
return(1);
}
|
fe3db98e16bf46b2db098377ad900c3f56a00423.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_3(unsigned int numEls
, const int dim0, const int dim1, const int dim2
, const float * i0_data, int i0_str_0, int i0_str_1, int i0_str_2
, float * o0_data, int o0_str_0, int o0_str_1, int o0_str_2
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos2 = ii % dim2;
ii = ii / dim2;
ii_i0_data += pos2 * i0_str_2;
ii_o0_data += pos2 * o0_str_2;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
o0_i = tanh(i0_data[i]);
o0_data[i] = o0_i;
}
}
static void can_collapse_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*dims[2]*1;
int local_dims[3];
int local_str[1][3];
int local_ostr[1][3];
int nd_collapse = 3;
for(int i=0;i<3;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<3;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<3;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<1;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<1;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[3] = {1,1,1};
int nd_collapse_0[3] = {1,1,1};
can_collapse_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(3,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
case 3: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_3), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], local_dims[2], i0_data, local_str[0][0], local_str[0][1], local_str[0][2], o0_data, local_ostr[0][0], local_ostr[0][1], local_ostr[0][2]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], local_dims[2], i0_data, local_str[0][0], local_str[0][1], local_str[0][2], o0_data, local_ostr[0][0], local_ostr[0][1], local_ostr[0][2])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V1;
__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_6:
double __DUMMY_6;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE tanh START\n";
//standard elemwise size checks
int dims[3] = {1,1,1};
int broadcasts_V3[3] = {0, 0, 0};
//std::cerr << "C_CODE tanh checking input V3\n";
if (3 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 3 dims, not %i", V3->nd);
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
for (int i = 0; i< 3; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE tanh checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
}
for (int i = 0; (i< 3) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
Py_DECREF(V1);
V1 = NULL;
}
}
if (V1 && !CudaNdarray_is_c_contiguous(V1))
{
Py_XDECREF(V1);
V1 = NULL;
}
if (NULL == V1)
{
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
//error string already set
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
//error string already set
Py_DECREF(V1);
V1 = NULL;
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
else // no error
{
}
}
//std::cerr << "C_CODE tanh END\n";
__label_5:
double __DUMMY_5;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_executor(__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d* self) {
return self->run();
}
static void __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (3 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 3, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d* struct_ptr = new __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_executor), struct_ptr, __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC inite10d3b1ffe3ec2cd900e40b60df0376d(void){
(void) Py_InitModule("e10d3b1ffe3ec2cd900e40b60df0376d", MyMethods);
}
| fe3db98e16bf46b2db098377ad900c3f56a00423.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_3(unsigned int numEls
, const int dim0, const int dim1, const int dim2
, const float * i0_data, int i0_str_0, int i0_str_1, int i0_str_2
, float * o0_data, int o0_str_0, int o0_str_1, int o0_str_2
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
float * ii_o0_data = o0_data;
int pos2 = ii % dim2;
ii = ii / dim2;
ii_i0_data += pos2 * i0_str_2;
ii_o0_data += pos2 * o0_str_2;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
o0_i = tanh(ii_i0_data[0]);
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{tanh,no_inplace}
// node.op.destroy_map={}
// Input 0 CudaNdarrayType(float32, 3D)
// Output 0 CudaNdarrayType(float32, 3D)
static __global__ void kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
o0_i = tanh(i0_data[i]);
o0_data[i] = o0_i;
}
}
static void can_collapse_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*dims[2]*1;
int local_dims[3];
int local_str[1][3];
int local_ostr[1][3];
int nd_collapse = 3;
for(int i=0;i<3;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<3;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<3;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<1;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<1;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[3] = {1,1,1};
int nd_collapse_0[3] = {1,1,1};
can_collapse_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(3,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
case 3: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_3<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], local_dims[2], i0_data, local_str[0][0], local_str[0][1], local_str[0][2], o0_data, local_ostr[0][0], local_ostr[0][1], local_ostr[0][2]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_e10d3b1ffe3ec2cd900e40b60df0376d_0 Tanh", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Tanh_node_e10d3b1ffe3ec2cd900e40b60df0376d_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], local_dims[2], i0_data, local_str[0][0], local_str[0][1], local_str[0][2], o0_data, local_ostr[0][0], local_ostr[0][1], local_ostr[0][2])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V1;
__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_6:
double __DUMMY_6;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE tanh START\n";
//standard elemwise size checks
int dims[3] = {1,1,1};
int broadcasts_V3[3] = {0, 0, 0};
//std::cerr << "C_CODE tanh checking input V3\n";
if (3 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 3 dims, not %i", V3->nd);
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
for (int i = 0; i< 3; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE tanh checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
}
for (int i = 0; (i< 3) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
Py_DECREF(V1);
V1 = NULL;
}
}
if (V1 && !CudaNdarray_is_c_contiguous(V1))
{
Py_XDECREF(V1);
V1 = NULL;
}
if (NULL == V1)
{
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
//error string already set
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
//error string already set
Py_DECREF(V1);
V1 = NULL;
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_e10d3b1ffe3ec2cd900e40b60df0376d_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 5;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_5;};
}
else // no error
{
}
}
//std::cerr << "C_CODE tanh END\n";
__label_5:
double __DUMMY_5;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_executor(__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d* self) {
return self->run();
}
static void __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (3 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 3, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d* struct_ptr = new __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_executor), struct_ptr, __struct_compiled_op_e10d3b1ffe3ec2cd900e40b60df0376d_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC inite10d3b1ffe3ec2cd900e40b60df0376d(void){
(void) Py_InitModule("e10d3b1ffe3ec2cd900e40b60df0376d", MyMethods);
}
|
805bebb9d3d0c5c71dd8cdcb7aab022eb19292cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
void printPtrInfo(char c, hipPointerAttribute_t attributes) {
printf("\n\ninfo of %c\n", c);
printf(" Memory type %i\n",attributes.memoryType);
printf(" Type %i\n",attributes.type);
printf(" Device %d\n",attributes.device);
printf(" isManaged %d\n",attributes.isManaged);
}
int main(void)
{
int N = 1<<30;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMemAdvise(x, N*sizeof(float), hipMemAdviseSetAccessedBy, hipCpuDeviceId);
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// get device properties
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("0 Unregistered\n1 Host\n2 Device\n3 Managed\n");
hipPointerAttribute_t attributes;
printf("Pointer info\n");
hipPointerGetAttributes(&attributes, x);
printPtrInfo('x', attributes);
hipPointerGetAttributes(&attributes, y);
printPtrInfo('y', attributes);
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 805bebb9d3d0c5c71dd8cdcb7aab022eb19292cc.cu | #include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
void printPtrInfo(char c, cudaPointerAttributes attributes) {
printf("\n\ninfo of %c\n", c);
printf(" Memory type %i\n",attributes.memoryType);
printf(" Type %i\n",attributes.type);
printf(" Device %d\n",attributes.device);
printf(" isManaged %d\n",attributes.isManaged);
}
int main(void)
{
int N = 1<<30;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMemAdvise(x, N*sizeof(float), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// get device properties
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("0 Unregistered\n1 Host\n2 Device\n3 Managed\n");
cudaPointerAttributes attributes;
printf("Pointer info\n");
cudaPointerGetAttributes(&attributes, x);
printPtrInfo('x', attributes);
cudaPointerGetAttributes(&attributes, y);
printPtrInfo('y', attributes);
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
cda70946280cd813f3c0d093c6ebde7d95109be7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "input.h"
#include "knn_functions.h"
#include "check.h"
#include "hipError_t.h"
#include "utility.h"
int main(int argc, char* argv[]) {
//salvare o meno il risultato su file
bool saveData = true;
bool checkresult = false;
if((argc -1) != 2){
printf("Errore non sono stati specificati correttamente i file del dataset!\n");
exit(EXIT_FAILURE);
}
//device
int deviceIndex = 0;
if (K > N){
printf("Errore il numero di vicini non pu essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
//numero di schede presenti
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
//check esistenza scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(hipSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// propriet della scheda video
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, deviceIndex));
//printf("M : %d Max threads per block: %d\n",M, prop.maxThreadsPerBlock );
//printf("Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
//printf("Max grid dimensions: (%d, %d, %d)\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] );
//printf("\n" );
/*
int maxthread;
hipDeviceGetAttribute(&maxthread, maxThreadsPerBlock);
//Check sforamento numero di thread per blocco
if (BLOCK_SIZE * BLOCK_SIZE > maxthread){
printf("Errore, superato massimo numero di thread per blocco!\n");
exit(EXIT_FAILURE);
}
*/
// misurare il tempo di esecuzione
hipEvent_t start, stop, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventCreate( &stopRead ) );
HANDLE_ERROR( hipEventCreate( &stopSendData ) );
HANDLE_ERROR( hipEventCreate( &primoStep ) );
HANDLE_ERROR( hipEventCreate( &secondoStep ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
float * trainingData= (float *) malloc(N* M * sizeof(float));
float * testingData= (float *) malloc(P* M * sizeof(float));
//HANDLE_ERROR( hipHostMalloc( (void**)&trainingData, N*M * sizeof( *trainingData ), hipHostMallocDefault ) );
//HANDLE_ERROR( hipHostMalloc( (void**)&testingData, P*M * sizeof( *testingData ), hipHostMallocDefault ) );
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(P *sizeof(int));
float * dist = (float *) malloc(P* N * sizeof(float));
//HANDLE_ERROR( hipHostMalloc( (void**)&dist, P*M * sizeof( *dist ), hipHostMallocDefault ) );
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
//reading data from file
read_file(trainFile, N, M, trainingData, classesTraining);
read_file(testFile, P, M, testingData, classesTesting);
//printf("nome file %s \n", trainFile);
//printf("nome file test %s \n", testFile);
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stopRead, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stopRead ) );
float elapsedTimeRead;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, stopRead ) );
//printf( "Lettura dati eseguita in: %f \n", elapsedTimeRead/1000 );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
// alloco memoria per il dataset sulla gpu in memoria globale
HANDLE_ERROR( hipMalloc( (void**)&dev_train, N * M * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_test, P * M * sizeof(float) ) );
//allocco matrice distanze e relative label
HANDLE_ERROR( hipMalloc( (void**)&dev_dist, P* N * sizeof(float) ) );
//HANDLE_ERROR( hipMalloc( (void**)&dev_label, P * N * sizeof(int) ) );
// copia elementi del dataset
HANDLE_ERROR( hipMemcpy( dev_train, trainingData, N * M * sizeof(float), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_test, testingData, P * M * sizeof(float), hipMemcpyHostToDevice ) );
//HANDLE_ERROR( hipMemcpy( dev_dist, dist, N * P * sizeof(float), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipEventRecord( stopSendData, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stopSendData ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, stopSendData ) );
//printf( "Copia dati su GPU eseguita dopo : %f secondi\n", elapsedTimeRead/1000 );
//HANDLE_ERROR( hipMemcpy( dev_label, label, N * P * sizeof(int), hipMemcpyHostToDevice ) );
// creo blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
//Numero di blocchi
int dim_row = (P +1 % BLOCK_SIZE == 0) ? P / BLOCK_SIZE : P / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
dim3 grid(dim_col, dim_row, 1); // a grid of CUDA thread blocks
//printf("Numero di blocchi %d %d da %d \n", dim_row, dim_col, BLOCK_SIZE);
//hipFuncSetCacheConfig(computeDist_kernel, hipFuncCachePreferL1);
// calcola distanza euclidea tra punti train e test
hipLaunchKernelGGL(( computeDist_kernel), dim3(grid), dim3(block), 0, 0, dev_train, dev_test, dev_dist);//, dev_label);
int * label = (int*) malloc(P * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
initilizeArray(confusionMatrix, LABELS*LABELS, 0);
// barriera per assicurarsi che tutte le distanze siano state calcolate
hipDeviceSynchronize();
HANDLE_ERROR( hipEventRecord( primoStep, 0 ) );
HANDLE_ERROR( hipEventSynchronize( primoStep ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
//printf( "Distanze calcolate dopo : %f secondi\n", elapsedTimeRead/1000 );
//dealloco dataset su device non pi utile
HANDLE_ERROR( hipFree(dev_train) );
HANDLE_ERROR( hipFree(dev_test) );
//hipDeviceSynchronize();
HANDLE_ERROR( hipMalloc( (void**)&dev_label, P * K * sizeof(int) ) );
//HANDLE_ERROR( hipMemcpy( dev_label, label, P*K * sizeof(int), hipMemcpyHostToDevice ) );
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
//printf("Numero di blocchi per il sort %d da %d \n", dim_row, BLOCK_SIZE);
hipLaunchKernelGGL(( sort_kernel), dim3(gridSort), dim3(blockSort), 0, 0, dev_dist, dev_label);
// barriera per assicurare che siano tutti ordinat
hipDeviceSynchronize();
//recupero risultati dalla GPU
//HANDLE_ERROR(hipMemcpy(dist , dev_dist, P * N * sizeof(float), hipMemcpyDeviceToHost ) );
HANDLE_ERROR(hipMemcpy(label , dev_label, P * K * sizeof(int), hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipEventRecord( secondoStep, 0 ) );
HANDLE_ERROR( hipEventSynchronize( secondoStep ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
//printf( "Ordinate e ricevute dopo : %f secondi\n", elapsedTimeRead/1000 );
/*printf("Dopoooooooo\n");
for(int i=0; i < P; i++){
for(int j=0; j < K; j++)
printf(" %d ", label[i*K +j]);
printf("\n\n");
}
*/
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
//il calcolo della matrice di confusione finale viene lasciato alla cpu
for (int i=0; i<P; i++){
initilizeArray(countsLabel, LABELS, 0);
int bestLabel = 0;
for(int j=0; j<K; j++){
int indice = label[i*K+j];
int classe = classesTraining[indice];
countsLabel[classe] = countsLabel[classe] + 1;
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
//update confusion matrix
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] +1;
}
//stampa Confusion matrix
//printConfusionMatrix(confusionMatrix);
//printf("Errori totali: %d\n", error);
//printf("Record corretti: %d accuratezza (%.2f%%); ", P - error, 100 - ((float) error / P) * 100);
// controllo risultato con il seriale
if(checkresult == true){
checkResultKNN(trainingData, testingData, classesTraining, classesTesting, confusionMatrix);
}
// dealloca memoria CPU
//HANDLE_ERROR( hipHostFree( trainingData) );
//HANDLE_ERROR( hipHostFree( testingData ) );
//HANDLE_ERROR( hipHostFree( dist ) );
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
//dealloco memoria GPU
//HANDLE_ERROR( hipFree(dev_train) );
//HANDLE_ERROR( hipFree(dev_test) );
HANDLE_ERROR( hipFree(dev_label ) );
HANDLE_ERROR( hipFree(dev_dist ) );
// conteggio tempo totale di esecuzione
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
//printf( "Total time: %f \n", elapsedTime/1000 );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
//HANDLE_ERROR( hipEventDestroy( stopRead ) );
//save on file
if(saveData == true)
saveResultsOnFile(elapsedTime/1000);
return 0;
}
| cda70946280cd813f3c0d093c6ebde7d95109be7.cu | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "input.h"
#include "knn_functions.h"
#include "check.h"
#include "cudaError.h"
#include "utility.h"
int main(int argc, char* argv[]) {
//salvare o meno il risultato su file
bool saveData = true;
bool checkresult = false;
if((argc -1) != 2){
printf("Errore non sono stati specificati correttamente i file del dataset!\n");
exit(EXIT_FAILURE);
}
//device
int deviceIndex = 0;
if (K > N){
printf("Errore il numero di vicini non può essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
//numero di schede presenti
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
//check esistenza scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(cudaSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// proprietà della scheda video
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, deviceIndex));
//printf("M : %d Max threads per block: %d\n",M, prop.maxThreadsPerBlock );
//printf("Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
//printf("Max grid dimensions: (%d, %d, %d)\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] );
//printf("\n" );
/*
int maxthread;
cudaDeviceGetAttribute(&maxthread, maxThreadsPerBlock);
//Check sforamento numero di thread per blocco
if (BLOCK_SIZE * BLOCK_SIZE > maxthread){
printf("Errore, superato massimo numero di thread per blocco!\n");
exit(EXIT_FAILURE);
}
*/
// misurare il tempo di esecuzione
cudaEvent_t start, stop, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventCreate( &stopRead ) );
HANDLE_ERROR( cudaEventCreate( &stopSendData ) );
HANDLE_ERROR( cudaEventCreate( &primoStep ) );
HANDLE_ERROR( cudaEventCreate( &secondoStep ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
float * trainingData= (float *) malloc(N* M * sizeof(float));
float * testingData= (float *) malloc(P* M * sizeof(float));
//HANDLE_ERROR( cudaHostAlloc( (void**)&trainingData, N*M * sizeof( *trainingData ), cudaHostAllocDefault ) );
//HANDLE_ERROR( cudaHostAlloc( (void**)&testingData, P*M * sizeof( *testingData ), cudaHostAllocDefault ) );
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(P *sizeof(int));
float * dist = (float *) malloc(P* N * sizeof(float));
//HANDLE_ERROR( cudaHostAlloc( (void**)&dist, P*M * sizeof( *dist ), cudaHostAllocDefault ) );
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
//reading data from file
read_file(trainFile, N, M, trainingData, classesTraining);
read_file(testFile, P, M, testingData, classesTesting);
//printf("nome file %s \n", trainFile);
//printf("nome file test %s \n", testFile);
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stopRead, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stopRead ) );
float elapsedTimeRead;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, stopRead ) );
//printf( "Lettura dati eseguita in: %f \n", elapsedTimeRead/1000 );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
// alloco memoria per il dataset sulla gpu in memoria globale
HANDLE_ERROR( cudaMalloc( (void**)&dev_train, N * M * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_test, P * M * sizeof(float) ) );
//allocco matrice distanze e relative label
HANDLE_ERROR( cudaMalloc( (void**)&dev_dist, P* N * sizeof(float) ) );
//HANDLE_ERROR( cudaMalloc( (void**)&dev_label, P * N * sizeof(int) ) );
// copia elementi del dataset
HANDLE_ERROR( cudaMemcpy( dev_train, trainingData, N * M * sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_test, testingData, P * M * sizeof(float), cudaMemcpyHostToDevice ) );
//HANDLE_ERROR( cudaMemcpy( dev_dist, dist, N * P * sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaEventRecord( stopSendData, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stopSendData ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, stopSendData ) );
//printf( "Copia dati su GPU eseguita dopo : %f secondi\n", elapsedTimeRead/1000 );
//HANDLE_ERROR( cudaMemcpy( dev_label, label, N * P * sizeof(int), cudaMemcpyHostToDevice ) );
// creo blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
//Numero di blocchi
int dim_row = (P +1 % BLOCK_SIZE == 0) ? P / BLOCK_SIZE : P / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
dim3 grid(dim_col, dim_row, 1); // a grid of CUDA thread blocks
//printf("Numero di blocchi %d %d da %d \n", dim_row, dim_col, BLOCK_SIZE);
//cudaFuncSetCacheConfig(computeDist_kernel, cudaFuncCachePreferL1);
// calcola distanza euclidea tra punti train e test
computeDist_kernel<<<grid, block>>>(dev_train, dev_test, dev_dist);//, dev_label);
int * label = (int*) malloc(P * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
initilizeArray(confusionMatrix, LABELS*LABELS, 0);
// barriera per assicurarsi che tutte le distanze siano state calcolate
cudaDeviceSynchronize();
HANDLE_ERROR( cudaEventRecord( primoStep, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( primoStep ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
//printf( "Distanze calcolate dopo : %f secondi\n", elapsedTimeRead/1000 );
//dealloco dataset su device non più utile
HANDLE_ERROR( cudaFree(dev_train) );
HANDLE_ERROR( cudaFree(dev_test) );
//cudaDeviceSynchronize();
HANDLE_ERROR( cudaMalloc( (void**)&dev_label, P * K * sizeof(int) ) );
//HANDLE_ERROR( cudaMemcpy( dev_label, label, P*K * sizeof(int), cudaMemcpyHostToDevice ) );
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
//printf("Numero di blocchi per il sort %d da %d \n", dim_row, BLOCK_SIZE);
sort_kernel<<<gridSort, blockSort>>>(dev_dist, dev_label);
// barriera per assicurare che siano tutti ordinat
cudaDeviceSynchronize();
//recupero risultati dalla GPU
//HANDLE_ERROR(cudaMemcpy(dist , dev_dist, P * N * sizeof(float), cudaMemcpyDeviceToHost ) );
HANDLE_ERROR(cudaMemcpy(label , dev_label, P * K * sizeof(int), cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaEventRecord( secondoStep, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( secondoStep ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
//printf( "Ordinate e ricevute dopo : %f secondi\n", elapsedTimeRead/1000 );
/*printf("Dopoooooooo\n");
for(int i=0; i < P; i++){
for(int j=0; j < K; j++)
printf(" %d ", label[i*K +j]);
printf("\n\n");
}
*/
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
//il calcolo della matrice di confusione finale viene lasciato alla cpu
for (int i=0; i<P; i++){
initilizeArray(countsLabel, LABELS, 0);
int bestLabel = 0;
for(int j=0; j<K; j++){
int indice = label[i*K+j];
int classe = classesTraining[indice];
countsLabel[classe] = countsLabel[classe] + 1;
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
//update confusion matrix
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] +1;
}
//stampa Confusion matrix
//printConfusionMatrix(confusionMatrix);
//printf("Errori totali: %d\n", error);
//printf("Record corretti: %d accuratezza (%.2f%%); ", P - error, 100 - ((float) error / P) * 100);
// controllo risultato con il seriale
if(checkresult == true){
checkResultKNN(trainingData, testingData, classesTraining, classesTesting, confusionMatrix);
}
// dealloca memoria CPU
//HANDLE_ERROR( cudaFreeHost( trainingData) );
//HANDLE_ERROR( cudaFreeHost( testingData ) );
//HANDLE_ERROR( cudaFreeHost( dist ) );
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
//dealloco memoria GPU
//HANDLE_ERROR( cudaFree(dev_train) );
//HANDLE_ERROR( cudaFree(dev_test) );
HANDLE_ERROR( cudaFree(dev_label ) );
HANDLE_ERROR( cudaFree(dev_dist ) );
// conteggio tempo totale di esecuzione
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
//printf( "Total time: %f \n", elapsedTime/1000 );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
//HANDLE_ERROR( cudaEventDestroy( stopRead ) );
//save on file
if(saveData == true)
saveResultsOnFile(elapsedTime/1000);
return 0;
}
|
874cc8391f0e6bc5e01e2c09288dadf3a53d498c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cusitf_function_H.h"
using namespace cv;
#define MESSAGE 1
texture<float, 1, hipReadModeElementType> texRef;
/***********
//This is an adjustable option which control the gaussKernel size. \
//when the kernel size less than 32*2+1 or kernel radius less than 32,the ROW_HALO_STEP set 1 \
//and the COLUMNS_HALO_STEPS set 2 will has a good performance.But when the kernel size is more \
//than 32 less than 64, the ROW_HALO_STEP should set 2 and the COLUMNS_HALO_STEPS should set 4
//The ROW_HALO_STEP will set 1 and the COLUMNS_HALO_STEPS will set 2 by default
***********/
///////////////////////
/// \brief GaussianBlurKernelRow
/// \param d_data
/// \param out
/// \param w
/// \param h
/// \param ksize
/// \param pitch
/// Only support the kernel size less than 32*2+1(ROW_HALO_STEP*ROW_BLOCK_DIM_X(32) is the radius)
/// Reference the cuda-sample 'convolutionSeparable'.
/// The boundary is set 0 which is different from OpenCV.The reason I simplify the boundary is \
/// that the description of the sift need not the boundary of the image which will be filter out.
/// If adjust the ROW_HALO_STEP 2,that is ok.
//////////////////////
#define ROW_BLOCK_DIM_X 32
#define ROW_BLOCK_DIM_Y 8
#define ROW_UNROLL_STEPS 4
#define ROW_HALO_STEP 1
__global__ void GaussianBlurKernelRow(
float *d_data,
float *out,
int w,
int h,
int ksize,
int pitch
)
{
__shared__ float s[ROW_BLOCK_DIM_Y][ROW_BLOCK_DIM_X*(ROW_UNROLL_STEPS+ROW_HALO_STEP*2)];
//base shared memory coordinate
int baseX = (blockIdx.x*ROW_UNROLL_STEPS-ROW_HALO_STEP)*blockDim.x + threadIdx.x;
int baseY = blockIdx.y*blockDim.y+threadIdx.y;
//the data basing shared memory coordinate
d_data += baseY * pitch + baseX;
out += baseY * pitch + baseX;
//Load main data
#pragma unroll
for(int i = ROW_HALO_STEP;i<ROW_UNROLL_STEPS+ROW_HALO_STEP;i++)
s[threadIdx.y][threadIdx.x+ i * ROW_BLOCK_DIM_X] = (baseX + ROW_BLOCK_DIM_X * i < w ) ? d_data[ROW_BLOCK_DIM_X * i] : 0;
//Load left halo
//left halo exist when this is threads in the imgae patch.
#pragma unroll
for (int i = 0; i < ROW_HALO_STEP; i++)
{
s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X] = (baseX >= -i * ROW_BLOCK_DIM_X ) ? d_data[i * ROW_BLOCK_DIM_X] : 0;
}
//Load right halo
//left halo exist when this is threads in the imgae patch.
#pragma unroll
for (int i = ROW_HALO_STEP + ROW_UNROLL_STEPS; i < ROW_HALO_STEP + ROW_UNROLL_STEPS + ROW_HALO_STEP; i++)
{
s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X] = (w - baseX > i * ROW_BLOCK_DIM_X) ? d_data[i * ROW_BLOCK_DIM_X] : 0;
}
__syncthreads();
int b = (ksize -1) /2;
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_UNROLL_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b; j <= b; j++)
{
sum += coeffGaussKernel[b-j] * s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X + j];
}
out[i * ROW_BLOCK_DIM_X] = sum;
}
//old version
// int b = (ksize -1) /2;
// if(x>=b && x<w-b && y>=0 && y<h){
// #pragma unroll
// float sum = 0;
// for(int i = -b;i<=b;i++){
// sum += d_data[y*pitch+x+i]*coeffGaussKernel[i+b];
// }
// out[y*pitch+x] = sum;
// }
}
///////////////////////////////////
/// \brief GaussianBlurKernelCol
/// \param d_data
/// \param out
/// \param w
/// \param h
/// \param ksize
/// \param pitch
/// There is a different with row that the col has not the pitch which could make sure the \
/// all thereds in image aera.
/// Reference the cuda-sample 'convolutionSeparable'
/// The boundary is set 0 which is different from OpenCV.The reason I simplify the boundary is \
/// that the description of the sift need not the boundary of the image which will be filter out.
/// The minimum y size is 64(COLUMNS_BLOCKDIM_Y*COLUMNS_RESULT_STEPS)
//////////////////////////////////
#define COLUMNS_BLOCKDIM_X 32
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 4
#define COLUMNS_HALO_STEPS 2
__global__ void GaussianBlurKernelCol(
float *d_data,
float *out,
int w,
int h,
int ksize,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_data += baseY * pitch + baseX;
out += baseY * pitch + baseX;
int b = (ksize -1) /2;
//fill the shared memory not consider the upper halo,so it limit the minimum y size is 64(COLUMNS_BLOCKDIM_Y*COLUMNS_RESULT_STEPS)
if(baseY + (COLUMNS_RESULT_STEPS+COLUMNS_HALO_STEPS)*COLUMNS_BLOCKDIM_Y >= h && baseY + COLUMNS_HALO_STEPS*COLUMNS_BLOCKDIM_Y < h)
{
//Main data and lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS*2 ; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y < h) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
__syncthreads();
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b ; j <= b; j++)
{
sum += coeffGaussKernel[b - j]* s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
if(baseY + i * COLUMNS_BLOCKDIM_Y < h) {
out[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
return;
}
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_data[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (h - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b ; j <= b; j++)
{
sum += coeffGaussKernel[b - j]* s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
out[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
#if 0
if(y>=b && y<h-b && x>=0 && x<w){
#pragma unroll
for(int i = 0;i<ksize;i++){
if(i<b){
out[y*pitch+x] += d_data[(y-b+i)*pitch+x]*coeffGaussKernel[i];
}
else{
out[y*pitch+x] += d_data[(y+i-b)*pitch+x]*coeffGaussKernel[i];
}
}
}
#else
// if(y>=b && y<h-b && x>=0 && x<w){
// #pragma unroll
// float sum = 0;
// for(int i = -b;i<=b;i++){
// sum += d_data[(y+i)*pitch+x]*coeffGaussKernel[i+b];
// }
// out[y*pitch+x] = sum;
// }
#endif
}
__global__ void GaussianBlurKernelRTex(float *out,int w,int h,int ksize)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int b = (ksize -1) /2;
if(x>=b && x<w-b && y>=0 && y<h){
#pragma unroll
for(int i = 0;i<ksize;i++){
if(i<b){
out[y*w+x] += tex1Dfetch(texRef,y*w+x-b+i)*coeffGaussKernel[i];
}
else{
out[y*w+x] += tex1Dfetch(texRef,y*w+x+i-b)*coeffGaussKernel[i];
}
}
}
}
__global__ void differenceImg(float *d_Octave0,float *d_Octave1,float *d_diffOctave,int pitch,int height){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int index = y * pitch + x;
if(y<height)
d_diffOctave[index] = (d_Octave1[index] - d_Octave0[index]);
}
__global__ void findScaleSpaceExtrema(float *prev,float *img,float *next,float *d_point,int width ,int pitch ,int height)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if(x<1 || y<1 || x>=width-1 || y>=height-1)
// return;
const int BLOCKDIMX = 32;
const int BLOCKDIMY = 8;
__shared__ float Mem0[BLOCKDIMY+2][BLOCKDIMX+2];
__shared__ float Mem1[BLOCKDIMY+2][BLOCKDIMX+2];
__shared__ float Mem2[BLOCKDIMY+2][BLOCKDIMX+2];
//the count of the extrema points in current block;
__shared__ unsigned int cnt;
//points storage in shared memory
__shared__ unsigned short points[96];
// float *ptr0 = prev[y * pitch + x];
// float *ptr1 = img[y * pitch + x];
// float *ptr2 = next[y * pitch + x];
prev += ( y-1 ) * pitch + x - 1;
img += ( y-1 ) * pitch + x - 1;
next += ( y-1 ) * pitch + x - 1;
Mem0[ty][tx] = (x<0||y<0)? 0:prev[0];
Mem1[ty][tx] = (x<0||y<0)? 0:img[0];
Mem2[ty][tx] = (x<0||y<0)? 0:next[0];
// Mem1[ty][32] = -400;
// Mem1[8][tx] = -400;
// Mem1[8][32] = -400;
//prev[0] = 250;
if(tx == 0 && ty == 0){
#pragma unroll
for(int i = BLOCKDIMY;i<BLOCKDIMY + 2;i++)
#pragma unroll
for(int j = 0;j<BLOCKDIMX+2;j++){
Mem0[i][j] = (x<width||y<height)? prev[i*pitch + j]:0;
Mem1[i][j] = (x<width||y<height)? img[i*pitch + j]:0;
Mem2[i][j] = (x<width||y<height)? next[i*pitch + j]:0;
}
#pragma unroll
for(int i = 0;i<BLOCKDIMY;i++)
#pragma unroll
for(int j = BLOCKDIMX;j<2+BLOCKDIMX;j++){
Mem0[i][j] = (x<width||y<height)? prev[i*pitch + j]:0;
Mem1[i][j] = (x<width||y<height)? img[i*pitch + j]:0;
Mem2[i][j] = (x<width||y<height)? next[i*pitch + j]:0;
}
cnt = 0;
//for points count synchronism
}
__syncthreads();
prev += pitch + 1;
img += pitch + 1;
next += pitch + 1;
// prev[0] = Mem0[ty+1][tx+1] + 200;
// img[0] = Mem1[ty+1][tx+1] + 200;
// next[0] = Mem2[ty+1][tx+1] + 200 ;
//next[0] = Mem2[ty+1][tx+1]*50 ;
const int threshold = int(0.5 * 0.04 / 3 * 255);
float val = img[0];
int c = 0;
int step = pitch;
float *currptr = img;
float *nextptr = next;
float *prevptr = prev;
if( std::abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = x;
points[3*pos+1] = y;
//points[3*pos+2] = scale;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>=2000 ? 2000-1 : idx);
d_point[idx*2] = x;
d_point[idx*2+1] = y;
printf("cnt : %d , x = %d , y = %d,asd: %f \n",idx,x,y,d_point[idx*2]);
}
}
__device__ void addpoint(){
}
//////
/// \brief findScaleSpaceExtrema
/// \param d_point
/// \param s
/// \param width
/// \param pitch
/// \param height
/// \param threshold
/// \param nOctaveLayers
/// \param maxNum
////////////
/// s is the index in dog
__global__ void findScaleSpaceExtrema(float *d_point,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
#if 1
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
#else
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
#endif
/******************calOrientationHist*****************/
{
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
//float hist[n];
//the procress of all point range, a square space.
int k, len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = new float[len*4 + n+4 + n];
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = W + len + 2,*hist = temphist+n+2;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
// if(k == 0)
// printf("temphist[%d]: %f , Mag[k] : %f , Y[k] : %f \n",bin,temphist[bin],Mag[k],Y[k]);
//printf("bin : %d , Mag[k]: %f, W[k]: %f ,temphist[bin] %f \n",bin,Mag[k],W[k],temphist[bin]);
//printf("Mag[k] : %f, X[k] : %f , Y[k] : %f \n",Mag[k],X[k],Y[k]);
k++;
}
}
//printf("pixel : %f \n",currptr[0]);
// for(int i = 0;i<len;i++)
// {
// Ori[i] = atan2f(Y[i],X[i]);
// Mag[i] = sqrtf(Y[i]*Y[i]+X[i]*X[i]);
// }
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
// kpt.angle = 360.f - (float)((360.f/n) * bin);
// if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
// kpt.angle = 0.f;
//addpoint;
#if 1
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
// kpt.pt.x = (c + xc) * (1 << octv);
// kpt.pt.y = (r + xr) * (1 << octv);
// kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
// //why '*2'
// kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
// kpt.response = std::abs(contr);
#else
#endif
}
}
delete []buf;
}//orientation
}//extrema
}
__device__ void calcDescriptors(float* currptr,int x,int y,float scl_octv,int pitch,int width,int height,float ori,float* d_decriptor,int index)
{
//description array
//calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
//x,y,360-angle,scl,d,n
//static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
//x y is the x y in prymid image
//scl_octv is the related scale in octave
//x,y,scl_octv has been calculated above
/******************calcDescriptor*****************/
int d = SIFT_DESCR_WIDTH,n = SIFT_DESCR_HIST_BINS;
ori = 360.f - ori;
if(std::abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
float cos_t = cosf(ori*(float)(CV_PI/180));
float sin_t = sinf(ori*(float)(CV_PI/180));
//n=8
float bins_per_rad = n / 360.f;
float exp_scale = -1.f/(d * d * 0.5f);
//3*scale,normalized 3*scale to 1
float hist_width = SIFT_DESCR_SCL_FCTR * scl_octv;
int radius = int(hist_width * 1.4142135623730951f * (d + 1) * 0.5f+0.5);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = min(radius, (int) sqrt(((double) width)*width + ((double) height)*height));
cos_t /= hist_width;
sin_t /= hist_width;
//len histlen ddn
//2
int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
float dst[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS];
int rows = height, cols = width;
float *buf = new float[len*6 + histlen];
//Memory arrangment:
// Mag
// X Y Ori W RBin CBin hist
// -_____-_____-_____-_____-_____-_____-__
//
float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
//init *hist = {0},because following code will use '+='
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
//traverse the boundary rectangle
//calculate two improtant data
//1.all dx,dy,w,ori,mag in image coordinary
//2.all x,y in bins coordinary(a relate coordinary)
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
// Calculate sample's histogram array coords rotated relative to ori.
// Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
// r_rot = 1.5) have full weight placed in row 1 after interpolation.
float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = y + i, c = x + j;
//d = 4
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
// float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
// float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
// W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
W[k] = __expf((c_rot * c_rot + r_rot * r_rot)*exp_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
k++;
}
}
k = 0;
for( ; k < len; k++ )
{
float rbin = RBin[k], cbin = CBin[k];
float obin = (Ori[k] - ori)*bins_per_rad;
float mag = Mag[k]*W[k];
int r0 = rbin - (int)rbin ;
int c0 = cbin - (int)cbin;
int o0 = obin - (int)obin;
rbin -= r0;
cbin -= c0;
obin -= o0;
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(n+2)] += v_rco010;
hist[idx+(n+3)] += v_rco011;
hist[idx+(d+2)*(n+2)] += v_rco100;
hist[idx+(d+2)*(n+2)+1] += v_rco101;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
}
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
{
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
hist[idx] += hist[idx+n];
hist[idx+1] += hist[idx+n+1];
for( k = 0; k < n; k++ )
dst[(i*d + j)*n + k] = hist[idx+k];
}
// copy histogram to the descriptor,
// apply hysteresis thresholding
// and scale the result, so that it can be easily converted
// to byte array
float nrm2 = 0;
len = d*d*n;
k = 0;
for( ; k < len; k++ )
nrm2 += dst[k]*dst[k];
float thr = sqrtf(nrm2)*SIFT_DESCR_MAG_THR;
i = 0, nrm2 = 0;
for( ; i < len; i++ )
{
float val = min(dst[i], thr);
dst[i] = val;
nrm2 += val*val;
}
nrm2 = SIFT_INT_DESCR_FCTR/max(sqrtf(nrm2), FLT_EPSILON);
for( ; k < len; k++ )
{
//dst[k] = (uchar)(dst[k]*nrm2);
d_decriptor[index*len + k] = (uchar)(dst[k]*nrm2);
}
delete []buf;
}
__global__ void calcPerOctaveLayers(float *d_point,float* d_decriptor,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger for next iteration
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
#if 1
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
#else
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
#endif
float ori = 0.;
float scl_octv = size*0.5f/(1 << o);
unsigned int idx_arr[2];
float ori_arr[2];
int num_idx = 0;
/******************calOrientationHist*****************/
{
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
//float hist[n];
//the procress of all point range, a square space.
int k, len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = new float[len*4 + n+4 + n];
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = W + len + 2,*hist = temphist+n+2;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
// if(k == 0)
// printf("temphist[%d]: %f , Mag[k] : %f , Y[k] : %f \n",bin,temphist[bin],Mag[k],Y[k]);
//printf("bin : %d , Mag[k]: %f, W[k]: %f ,temphist[bin] %f \n",bin,Mag[k],W[k],temphist[bin]);
//printf("Mag[k] : %f, X[k] : %f , Y[k] : %f \n",Mag[k],X[k],Y[k]);
k++;
}
}
//printf("pixel : %f \n",currptr[0]);
// for(int i = 0;i<len;i++)
// {
// Ori[i] = atan2f(Y[i],X[i]);
// Mag[i] = sqrtf(Y[i]*Y[i]+X[i]*X[i]);
// }
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
// kpt.angle = 360.f - (float)((360.f/n) * bin);
// if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
// kpt.angle = 0.f;
//addpoint;
#if 1
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
ori = 360.f - (float)((360.f/n) * bin);
if(abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
d_point[idx*KEYPOINTS_SIZE+5] = ori;
// kpt.pt.x = (c + xc) * (1 << octv);
// kpt.pt.y = (r + xr) * (1 << octv);
// kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
// //why '*2'
// kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
// kpt.response = std::abs(contr);
ori_arr[num_idx] = ori;
idx_arr[num_idx] = idx;
num_idx++;
#else
#endif
}
}
delete []buf;
}//orientation
num_idx = min(num_idx,2);
for(int i = 0;i<num_idx;i++)
calcDescriptors(currptr,x,y,scl_octv,pitch,width,height,ori_arr[num_idx],d_decriptor,idx_arr[num_idx]);
}//extrema
}
__global__ void findScaleSpaceExtrema_gpu(float *d_point,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
d_point[idx*KEYPOINTS_SIZE+6] = s;
d_point[idx*KEYPOINTS_SIZE+7] = x;
d_point[idx*KEYPOINTS_SIZE+8] = y;
//temsize+=size*0.5f/(1 << o)*SIFT_ORI_RADIUS+0.5;
float scl_octv = size*0.5f/(1 << o);
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5;
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//int temBuffSize = len*4+2*SIFT_ORI_HIST_BINS+2;
atomicMax(&temsize,len);
}
}
__global__ void calcOrientationHist_gpu(float *d_point,float* temdata,const int buffSize,const int pointsNum,const int maxNum,const int nOctaveLayers)
{
//int x = blockIdx.x*blockDim.x+threadIdx.x;
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORY
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size =s_point[threadIdx.x*KEYPOINTS_SIZE+3];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float* currptr;
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
float* hists = new float[2*n+4];
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = temdata+pointIndex*buffSize;
//float *buf = (float *)malloc((len*4 + n+4 + n)*sizeof(float));
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = hists + 2;
float* hist = temphist + 2+n;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
// if(radius > 16)
// printf("radius: %d, point index : %d\n",radius,pointIndex);
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
k++;
}
}
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
#ifdef SHAREMEMORY
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = s_point[threadIdx.x*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = s_point[threadIdx.x*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = s_point[threadIdx.x*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = s_point[threadIdx.x*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
d_point[idx*KEYPOINTS_SIZE+6] = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
d_point[idx*KEYPOINTS_SIZE+7] = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
d_point[idx*KEYPOINTS_SIZE+8] = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
}
#else
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = d_point[pointIndex*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = d_point[pointIndex*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = d_point[pointIndex*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = d_point[pointIndex*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = d_point[pointIndex*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
}
#endif
}
}
delete []hists;
}
__global__ void calcOrientationHist_gpu1(float *d_point,float* temdata,const int buffSize,const int pointsNum,const int maxNum,const int nOctaveLayers)
{
//int x = blockIdx.x*blockDim.x+threadIdx.x;
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORY
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size =s_point[threadIdx.x*KEYPOINTS_SIZE+3];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float* currptr;
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
float* hists = new float[2*n+4];
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = temdata+pointIndex*buffSize;
//float *buf = (float *)malloc((len*4 + n+4 + n)*sizeof(float));
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = hists + 2;
float* hist = temphist + 2+n;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
// if(radius > 16)
// printf("radius: %d, point index : %d\n",radius,pointIndex);
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
//X[k] = dx;
//Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
float wk,ok,mk;
// W[k] = __expf((i*i + j*j)*expf_scale);
// Ori[k] = atan2f(dy,dx);
// Mag[k] = sqrtf(dy*dy+dx*dx);
wk = __expf((i*i + j*j)*expf_scale);
ok = atan2f(dy,dx);
mk = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(ok*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += wk*mk;
k++;
}
}
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
#ifdef SHAREMEMORY
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = s_point[threadIdx.x*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = s_point[threadIdx.x*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = s_point[threadIdx.x*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = s_point[threadIdx.x*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
d_point[idx*KEYPOINTS_SIZE+6] = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
d_point[idx*KEYPOINTS_SIZE+7] = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
d_point[idx*KEYPOINTS_SIZE+8] = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
}
#else
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = d_point[pointIndex*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = d_point[pointIndex*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = d_point[pointIndex*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = d_point[pointIndex*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = d_point[pointIndex*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
}
#endif
}
}
delete []hists;
}
__global__ void calcSIFTDescriptor_gpu(float *d_point,float* d_decriptor,int pointsNum,int nOctaveLayers)
{
//float* currptr,int x,int y,float scl_octv,int pitch,int width,int height,float ori,float* d_decriptor,int index
//description array
//calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
//x,y,360-angle,scl,d,n
//static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
//x y is the x y in prymid image
//scl_octv is the related scale in octave
//x,y,scl_octv has been calculated above
/******************calcDescriptor*****************/
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORYa
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
float ori = s_point[threadIdx.x*KEYPOINTS_SIZE+5];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
float ori = d_point[pointIndex*KEYPOINTS_SIZE+5];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
float scl_octv = size/((1 << o)*2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float *currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
int d = SIFT_DESCR_WIDTH,n = SIFT_DESCR_HIST_BINS;
ori = 360.f - ori;
if(std::abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
//printf(" %d,%d,%f,%f,%f ",x,y,*currptr,ori,scl_octv);
//printf(" %d,%d,%f ",x,y,*(pgpyr[o*(nOctaveLayers+3) + layer]+1));
float cos_t = cosf(ori*(float)(CV_PI/180));
float sin_t = sinf(ori*(float)(CV_PI/180));
//n=8
float bins_per_rad = n / 360.f;
float exp_scale = -1.f/(d * d * 0.5f);
//3*scale,normalized 3*scale to 1
float hist_width = SIFT_DESCR_SCL_FCTR * scl_octv;
int radius = int(hist_width * 1.4142135623730951f * (d + 1) * 0.5f+0.5);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = min(radius, (int) sqrt(((double) width)*width + ((double) height)*height));
cos_t /= hist_width;
sin_t /= hist_width;
//len histlen ddn
//2
int i, j, k, len = (radius*2+1)*(radius*2+1);
__shared__ float dst1[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS*BLOCK_SIZE_ONE_DIM];
float* dst = dst1+threadIdx.x*d*d*n;
//float dst[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS];
int rows = height, cols = width;
//float *buf = new float[len*6 + histlen];
const int histlen = (SIFT_DESCR_WIDTH+2)*(SIFT_DESCR_WIDTH+2)*(SIFT_DESCR_HIST_BINS+2);
float hist[histlen];
//__shared__ float hist[histlen*BLOCK_SIZE_ONE_DIM];
//init *hist = {0},because following code will use '+='
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
//traverse the boundary rectangle
//calculate two improtant data
//1.all dx,dy,w,ori,mag in image coordinary
//2.all x,y in bins coordinary(a relate coordinary)
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
// Calculate sample's histogram array coords rotated relative to ori.
// Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
// r_rot = 1.5) have full weight placed in row 1 after interpolation.
float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = y + i, c = x + j;
//d = 4
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
// float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
// float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
//X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
// W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
float wk,ok,mk;
wk = __expf((c_rot * c_rot + r_rot * r_rot)*exp_scale);
ok = atan2f(dy,dx);
ok = (ok*180/CV_PI);
ok = ok<0? ok+360:ok;
mk = sqrtf(dy*dy+dx*dx);
//float rbin = RBin[k], cbin = CBin[k];
float obin = (ok - ori)*bins_per_rad;
float mag = mk*wk;
int r0 = floor(rbin);
int c0 = floor(cbin);
int o0 = floor(obin);
rbin -= r0;
cbin -= c0;
obin -= o0;
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
// if(x == 1936 && y ==744 ){
// printf("k: %d,rbin: %f cbin: %f obin: %f mag: %f ok: %f\n",k,rbin,cbin,obin,mag,ok);
// }
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(n+2)] += v_rco010;
hist[idx+(n+3)] += v_rco011;
hist[idx+(d+2)*(n+2)] += v_rco100;
hist[idx+(d+2)*(n+2)+1] += v_rco101;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
k++;
}
}
// if(x == 1936 && y ==744 ){
// for(int i =0;i<360;i++)
// printf(" %f ",hist[i]);
// printf("k: %d",k);
// }
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
{
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
hist[idx] += hist[idx+n];
hist[idx+1] += hist[idx+n+1];
for( k = 0; k < n; k++ )
dst[(i*d + j)*n + k] = hist[idx+k];
}
// copy histogram to the descriptor,
// apply hysteresis thresholding
// and scale the result, so that it can be easily converted
// to byte array
float nrm2 = 0;
len = d*d*n;
k = 0;
for( ; k < len; k++ )
nrm2 += dst[k]*dst[k];
float thr = sqrtf(nrm2)*SIFT_DESCR_MAG_THR;
i = 0, nrm2 = 0;
for( ; i < len; i++ )
{
float val = min(dst[i], thr);
dst[i] = val;
nrm2 += val*val;
}
__syncthreads();
nrm2 = SIFT_INT_DESCR_FCTR/max(sqrtf(nrm2), FLT_EPSILON);
k = 0;
for( ; k < len; k++ )
{
//dst[k] = (uchar)(dst[k]*nrm2);
d_decriptor[pointIndex*len + k] = (uchar)(dst[k]*nrm2);
// if(x == 21 && y ==257 ){
// printf("k: %d,%f \n",k,d_decriptor[pointIndex*len + k]);
// }
}
}
// Scale down thread block width
#define SCALEDOWN_W 160
// Scale down thread block height
#define SCALEDOWN_H 16
__constant__ float d_Kernel1[5];
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
}
__global__ void test()
{
// unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
// printf("cnt : %d \n",d_PointCounter[0]);
}
void testDiffimage(float *d_Octave0,float *d_Octave1,float *d_diffOctave,int pitch,int height){
dim3 Block(32,8);
dim3 Grid(iDivUp(pitch,Block.x),iDivUp(height,Block.y));
hipLaunchKernelGGL(( differenceImg), dim3(Grid),dim3(Block), 0, 0, d_Octave0,d_Octave1,d_diffOctave,pitch,height);
safeCall(hipDeviceSynchronize());
}
//input cudaImage and output cudaImage which d_data has been smooth
void cuGaussianBlur(CudaImage &cuImg,float sigma)
{
assert(sigma>0);
int kernelSize = 0;
//sigma = sqrtf(sigma * sigma - 0.5 * 0.5 * 4);
//why the
//ksize.width = cvRound(sigma*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
kernelSize = cvRound(sigma*4*2 + 1)|1;
assert( kernelSize < 32*2+1 );
Mat kx;
kx = getGaussianKernel(kernelSize,sigma,CV_32F);
CHECK(hipMemcpyToSymbol(coeffGaussKernel,(float*)kx.data,sizeof(float)*kernelSize));
dim3 BlockRow(ROW_BLOCK_DIM_X,ROW_BLOCK_DIM_Y);
dim3 GridRow(iDivUp(cuImg.pitch,BlockRow.x*ROW_UNROLL_STEPS),iDivUp(cuImg.height,BlockRow.y));
float *tmp_data,*tmp_data1;
size_t pitch;
safeCall(hipMallocPitch((void**)&tmp_data, (size_t*) &pitch, (size_t) cuImg.width*sizeof(float), (size_t) cuImg.height));
hipLaunchKernelGGL(( GaussianBlurKernelRow), dim3(GridRow),dim3(BlockRow), 0, 0, cuImg.d_data,tmp_data,cuImg.width,cuImg.height,kernelSize,cuImg.pitch);
safeCall(hipDeviceSynchronize());
safeCall(hipMallocPitch((void**)&tmp_data1, (size_t*) &pitch, (size_t) cuImg.width*sizeof(float), (size_t) cuImg.height));
dim3 BlockCol(COLUMNS_BLOCKDIM_X,COLUMNS_BLOCKDIM_Y);
dim3 GridCol(iDivUp(cuImg.pitch,BlockCol.x),iDivUp(cuImg.height,BlockCol.y*COLUMNS_RESULT_STEPS));
hipLaunchKernelGGL(( GaussianBlurKernelCol), dim3(GridCol),dim3(BlockCol), 0, 0, tmp_data,tmp_data1,cuImg.width,cuImg.height,kernelSize,cuImg.pitch);
safeCall(hipDeviceSynchronize());
/*device data has not copy to host yet*/
safeCall(hipMemcpy2D(cuImg.d_data,cuImg.pitch*sizeof(float),tmp_data1,cuImg.pitch*sizeof(float),cuImg.width*sizeof(float),(size_t) cuImg.height,hipMemcpyDeviceToDevice));
#if 0
Mat dis(cuImg.height,cuImg.width,CV_32F);
safeCall(hipMemcpy2D(dis.data,cuImg.width*sizeof(float),tmp_data1,cuImg.pitch*sizeof(float),cuImg.width*sizeof(float),(size_t) cuImg.height,hipMemcpyDeviceToHost));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey();
#endif
hipFree(tmp_data);
hipFree(tmp_data1);
#if MESSAGE == 0
std::cout<<kernelSize<<std::endl;
for(int i= 0 ;i<kx.rows;i++)
for(int j = 0;j<kx.cols;j++){
std::cout<<kx.at<float>(i,j)<<std::endl;
}
#endif
}
void createInitialImage(const Mat &src, CudaImage &base, float sigma,bool doubleImageSize)
{
int width = src.cols;
int height = src.rows;
if(!src.data){
printf("input none data !");
return;
}
Mat gray, gray_fpt;
if( src.channels() == 3 || src.channels() == 4 )
{
cvtColor(src, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<float>::type, 1, 0);
}
else
src.convertTo(gray_fpt, DataType<float>::type, 1, 0);
//sigma different which is sqrt(1.6*1.6-0.5*0.5*4)
float sig_diff;
if( doubleImageSize )
{
sig_diff = sqrtf( ::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
resize(gray_fpt, gray_fpt, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
width = gray_fpt.cols;
height = gray_fpt.rows;
base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
base.Download();
cuGaussianBlur(base,sig_diff);
}
else
{
sig_diff = sqrtf( ::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
base.Download();
cuGaussianBlur(base,sig_diff);
//GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
}
}
double ScaleDown(CudaImage &res, CudaImage &src, float variance)
{
if (res.d_data==NULL || src.d_data==NULL) {
printf("ScaleDown: missing data\n");
return 0.0;
}
// double a = 0.6;
// float h_Kernel[5] = {1.0/4 - a/2.0, 1.0/4, a, 1.0/4, 1.0/4 - a/2.0};
float h_Kernel[5];
float kernelSum = 0.0f;
for (int j=0;j<5;j++) {
h_Kernel[j] = (float)expf(-(double)(j-2)*(j-2)/2.0/variance);
kernelSum += h_Kernel[j];
}
for (int j=0;j<5;j++)
h_Kernel[j] /= kernelSum;
safeCall(hipMemcpyToSymbol(d_Kernel1, h_Kernel, 5*sizeof(float)));
dim3 blocks(iDivUp(src.width, SCALEDOWN_W), iDivUp(src.height, SCALEDOWN_H));
dim3 threads(SCALEDOWN_W + 4);
hipLaunchKernelGGL(( ScaleDown), dim3(blocks), dim3(threads), 0, 0, res.d_data, src.d_data, src.width, src.pitch, src.height, res.pitch);
checkMsg("ScaleDown() execution failed\n");
return 0.0;
}
void buildGaussianPyramid(CudaImage& base, std::vector<CudaImage>& pyr, int nOctaves){
//the vector of sigma per octave
std::vector<double> sig(nOctaveLayers + 3);
//init the size of the pyramid images which is nOctave*nLayer
pyr.resize(nOctaves*(nOctaveLayers + 3));
#define USE_SEPARATION_MEMORY
#ifdef USE_SEPARATION_MEMORY
//allocate separation memory
int w = base.width;
int h = base.height;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
for( int i = 0; i < nOctaveLayers + 3; i++ ){
pyr[o*(nOctaveLayers + 3) + i].Allocate(w,h,iAlignUp(w, 128),false);
}
}
#else
//optimization points which allocate a big memory
int w = base.width;
int h = base.height;
int pyrDataSize = 0;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
int p = iAlignUp(w,128);
pyrDataSize += (nOctaveLayers+3)*p*h;
}
float* d_pyrData = NULL;
hipMalloc(&d_pyrData,pyrDataSize*sizeof(float));
//size_t pitch;
//safeCall(hipMallocPitch((void **)&d_pyrData, &pitch, (size_t)4096, (pyrDataSize+4095)/4096));
//safeCall(hipMallocPitch((void **)&d_pyrData, &pitch, (size_t)4096, (pyrDataSize+4095)/4096*sizeof(float)));
int memLocation = 0;
w = base.width;
h = base.height;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
for( int i = 0; i < nOctaveLayers + 3; i++ ){
int p = iAlignUp(w,128);
pyr[o*(nOctaveLayers + 3) + i].Allocate(w,h,p,false,d_pyrData+memLocation);
//because the d_pyrData is the type of float so the offset of the
//pointer is p*h rather than p*h*sizeof(float)
memLocation += p*h;
}
}
// CudaImage& src = pyr[0*(nOctaveLayers + 3)];
// CudaImage& dst = pyr[0*(nOctaveLayers + 3)+1];
// dst.copyDevice(src,1);
#endif
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = ::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = ::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
CudaImage& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 ){
dst.copyDevice(base,1);
#ifdef SHOW_GAUSSIANPYRAMID
CudaImage &src = dst;
Mat gray,show;
show.create(src.height,src.width,CV_32F);
safeCall(hipMemcpy2D(show.data,src.width*sizeof(float),src.d_data,src.pitch*sizeof(float),src.width*sizeof(float),(size_t) src.height,hipMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
CudaImage& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
ScaleDown(dst,src,0.5);
}
else
{
CudaImage& src = pyr[o*(nOctaveLayers + 3) + i-1];
dst.copyDevice(src,1);
cuGaussianBlur(dst,sig[i]);
#ifdef SHOW_GAUSSIANPYRAMID
Mat gray,show;
show.create(dst.height,dst.width,CV_32F);
safeCall(hipMemcpy2D(show.data,src.width*sizeof(float),dst.d_data,src.pitch*sizeof(float),src.width*sizeof(float),(size_t) src.height,hipMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
}
}
}
//could use cuda stream
void buildDoGPyramid( std::vector<CudaImage>& gpyr, std::vector<CudaImage>& dogpyr )
{
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
//could use cuda stream
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers + 2;i++){
CudaImage& prev = gpyr[o*(nOctaveLayers + 3)+i];
CudaImage& next = gpyr[o*(nOctaveLayers + 3)+i+1];
CudaImage& diff = dogpyr[o*(nOctaveLayers + 2)+i];
diff.Allocate(prev.width,prev.height,prev.pitch,false);
dim3 Block(32,8);
dim3 Grid(iDivUp(diff.pitch,Block.x),iDivUp(diff.height,Block.y));
hipLaunchKernelGGL(( differenceImg), dim3(Grid),dim3(Block), 0, 0, prev.d_data,next.d_data,diff.d_data,diff.pitch,diff.height);
safeCall(hipDeviceSynchronize());
#ifdef SHOW_DOGPYRAMID
Mat gray,show;
show.create(diff.height,diff.width,CV_32F);
safeCall(hipMemcpy2D(show.data,diff.width*sizeof(float),diff.d_data,diff.pitch*sizeof(float),diff.width*sizeof(float),(size_t) diff.height,hipMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 30, 200);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
}
}
int getMaxDescriptorBufSize(int len){
//get the max scl_oct
int radius_ori = (sqrt(len)-1)/2;
//int radius = SIFT_ORI_RADIUS * scl_octv+0.5;
float maxScl_oct = (radius_ori + 1)/SIFT_ORI_RADIUS;
int radius_des = int((SIFT_DESCR_SCL_FCTR * maxScl_oct * 1.4142135623730951f * (SIFT_DESCR_WIDTH + 1) * 0.5f)+0.5);
return int((radius_des*2+1)*(radius_des*2+1));
}
void findScaleSpaceExtrema(std::vector<CudaImage>& gpyr, std::vector<CudaImage>& dogpyr, std::vector<KeyPoint>& keypointss, Mat &descriptors){
float* d_keypoints;
float* h_keypoints;
int totPts = 0;
safeCall(hipMemcpyToSymbol(d_PointCounter, &totPts, sizeof(int)));
hipMalloc(&d_keypoints,sizeof(float)*maxPoints*KEYPOINTS_SIZE);
const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
//std::cout<<"my threshold = "<<threshold<<std::endl;
#ifdef FIND_DOGERRORTEST
#else
float **h_pd = new float*[dogpyr.size()];
#endif
for(int i = 0;i<dogpyr.size();i++)
h_pd[i] = dogpyr[i].d_data;
safeCall(hipMemcpyToSymbol(pd, h_pd, sizeof(float *)*dogpyr.size()));
float **h_gpyr = new float*[gpyr.size()];
for(int i = 0;i<gpyr.size();i++)
h_gpyr[i] = gpyr[i].d_data;
safeCall(hipMemcpyToSymbol(pgpyr, h_gpyr, sizeof(float *)*gpyr.size()));
//for every OctaveLayers which number is o*3
#if 0
//combine findextrema and oritentation
dim3 Block(32,8);
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers;i++){
int index = o*(nOctaveLayers+2)+i+1;
dim3 Grid(iDivUp(dogpyr[index].pitch,Block.x),iDivUp(dogpyr[index].height,Block.y));
hipLaunchKernelGGL(( findScaleSpaceExtrema), dim3(Grid),dim3(Block), 0, 0, d_keypoints,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
//calcPerOctaveLayers<<<Grid,Block>>>(d_keypoints,d_decriptor,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
safeCall(hipDeviceSynchronize());
}
}
#else
int temDataSize = 0;
safeCall(hipMemcpyToSymbol(temsize, &temDataSize, sizeof(int)));
dim3 Block(32,8);
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers;i++){
int index = o*(nOctaveLayers+2)+i+1;
dim3 Grid(iDivUp(dogpyr[index].pitch,Block.x),iDivUp(dogpyr[index].height,Block.y));
hipLaunchKernelGGL(( findScaleSpaceExtrema_gpu), dim3(Grid),dim3(Block), 0, 0, d_keypoints,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
safeCall(hipDeviceSynchronize());
}
}
int num0 = 0;
safeCall(hipMemcpyFromSymbol(&num0, d_PointCounter, sizeof(int)));
num0 = (num0>maxPoints)? maxPoints:num0;
printf("cuda sift kepoints num : %d \n",num0);
int* oIndex = new int[33];
for(int i =0;i<nOctaves;i++){
int index = i*(nOctaveLayers+2);
oIndex[i*3] = dogpyr[index].width;
oIndex[i*3+1] = dogpyr[index].height;
oIndex[i*3+2] = dogpyr[index].pitch;
}
safeCall(hipMemcpyToSymbol(d_oIndex, oIndex, sizeof(int)*33));
// int* d_oIndex;
// hipMalloc(&d_oIndex,sizeof(int)*nOctaves*3);
// hipMemcpy(d_oIndex,oIndex,sizeof(int)*nOctaves*3,hipMemcpyHostToDevice);
float* temData;
safeCall(hipMemcpyFromSymbol(&temDataSize, temsize, sizeof(int)));
//4 is the 4 len buf
int buffSize = temDataSize*4;
safeCall(hipMalloc(&temData,sizeof(float)*num0*buffSize));
//std::cout<<"buffSize:"<<buffSize<<std::endl;
int grid =iDivUp(num0,BLOCK_SIZE_ONE_DIM);
//use the global memory
//calcOrientationHist_gpu<<<grid,BLOCK_SIZE_ONE_DIM>>>(d_keypoints,temData,buffSize,num0,maxPoints,nOctaveLayers);
hipLaunchKernelGGL(( calcOrientationHist_gpu1), dim3(grid),dim3(BLOCK_SIZE_ONE_DIM), 0, 0, d_keypoints,temData,buffSize,num0,maxPoints,nOctaveLayers);
safeCall( hipGetLastError() );
safeCall(hipDeviceSynchronize());
hipFree(temData);
int num1 = 0;
safeCall(hipMemcpyFromSymbol(&num1, d_PointCounter, sizeof(int)));
num1 = (num1>maxPoints)? maxPoints:num1;
printf("cuda sift kepoints num : %d \n",num1);
//alloc for d_decriptor
float* d_descriptor;
int despriptorSize = SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
hipMalloc(&d_descriptor,sizeof(float)*num1*despriptorSize);
grid =iDivUp(num1,BLOCK_SIZE_ONE_DIM);
hipLaunchKernelGGL(( calcSIFTDescriptor_gpu), dim3(grid),dim3(BLOCK_SIZE_ONE_DIM), 0, 0, d_keypoints,d_descriptor,num1,nOctaveLayers);
safeCall( hipGetLastError() );
safeCall(hipDeviceSynchronize());
float *h_descriptor;
h_descriptor = (float *)malloc(num1*despriptorSize*sizeof(float));
safeCall(hipMemcpy(h_descriptor,d_descriptor,num1*despriptorSize*sizeof(float),hipMemcpyDeviceToHost));
descriptors.create(num1,128,CV_32FC1);
safeCall(hipMemcpy((float*)descriptors.data,d_descriptor,num1*128*sizeof(float),hipMemcpyDeviceToHost));
#endif
int num = 0;
safeCall(hipMemcpyFromSymbol(&num, d_PointCounter, sizeof(int)));
num = (num>maxPoints)? maxPoints:num;
printf("cuda sift kepoints num : %d \n",num);
h_keypoints = (float *)malloc(num*KEYPOINTS_SIZE*sizeof(float));
safeCall(hipMemcpy(h_keypoints,d_keypoints,num*KEYPOINTS_SIZE*sizeof(float),hipMemcpyDeviceToHost));
hipFree(d_keypoints);
hipFree(d_descriptor);
#ifdef SHOW_KEYPOINT
//std::vector<KeyPoint> keypointss;
keypointss.resize(num);
for(int i = 0;i<keypointss.size();++i)
{
keypointss[i].pt.x = h_keypoints[i*KEYPOINTS_SIZE];
keypointss[i].pt.y = h_keypoints[i*KEYPOINTS_SIZE+1];
keypointss[i].octave = h_keypoints[i*KEYPOINTS_SIZE+2];
keypointss[i].size = h_keypoints[i*KEYPOINTS_SIZE+3];
keypointss[i].response = h_keypoints[i*KEYPOINTS_SIZE+4];
keypointss[i].angle = h_keypoints[i*KEYPOINTS_SIZE+5];
}
// KeyPointsFilter::removeDuplicatedSorted( keypointss );
// printf("my sift kepoints num after clear : %d \n",keypointss.size());
#ifdef NODOUBLEIMAGE
#else
int firstOctave = -1;
if( firstOctave < 0 )
for( size_t i = 0; i < keypointss.size(); i++ )
{
KeyPoint& kpt = keypointss[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
#endif
// Mat kepoint;
//// CudaImage &img = gpyr[0];
//// Mat img_1(img.height,img.width,CV_32F);
//// safeCall(hipMemcpy2D(img_1.data,img.width*sizeof(float),gpyr[0].d_data,gpyr[0].pitch*sizeof(float),gpyr[0].width*sizeof(float),(size_t) gpyr[0].height,hipMemcpyDeviceToHost));
// //char *a ="../data/100_7101.JPG";
// //char *a ="../data/img2.ppm";
// //char *a ="../data/100_7101.JPG";
// char *a ="../data/road.png";
// Mat img_1 = imread(a);
// Mat gray;
// img_1.convertTo(gray,DataType<uchar>::type, 1, 0);
// drawKeypoints(gray,keypointss,kepoint,cv::Scalar::all(-1),4);
// cvNamedWindow("extract_my",CV_WINDOW_NORMAL);
// imshow("extract_my", kepoint);
// waitKey(0);
// for(int i = 0;i < keypointss.size();i++)
// std::cout<<keypointss[i].pt.x<<" ";
// std::cout<<std::endl;
#ifdef COMPARE_VALUE
sort(keypointss.begin(),keypointss.end(),sortx);
int unique_nums;
unique_nums = std::unique(keypointss.begin(),keypointss.end(),uniquex) - keypointss.begin();
for(int i = 0;i < unique_nums;i++)
std::cout<<keypointss[i].response<<" ";
std::cout<<unique_nums<<std::endl;
#endif
#endif
free(h_keypoints);
free(h_descriptor);
}
void calcDescriptors(std::vector<CudaImage>& gpyr,float* d_keypoints){
}
void displayOctave(std::vector<CudaImage> &Octave)
{
Mat display;
int width = Octave[0].width;
int height = Octave[0].height*Octave.size();
display.create(height,width,CV_32F);
// for(int i = 0 ; i<Octave.size(); i++){
// safeCall(hipMemcpy2D(display.data+width*Octave[0].height*sizeof(float)*i,Octave[0].width*sizeof(float),Octave[0].d_data,Octave[0].pitch*sizeof(float),Octave[0].width*sizeof(float),(size_t) Octave[0].height,hipMemcpyDeviceToHost));
// }
for(int i = 0 ; i<Octave.size(); i++){
safeCall(hipMemcpy2D(display.data+Octave[i].width*Octave[i].height*i*sizeof(float),Octave[i].width*sizeof(float),Octave[i].d_data,Octave[i].pitch*sizeof(float),Octave[i].width*sizeof(float),(size_t) Octave[i].height,hipMemcpyDeviceToHost));
}
Mat gray;
display.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("a",CV_WINDOW_NORMAL);
imshow("a",gray);
waitKey(0);
}
void disMatf(char* name,CudaImage &img){
Mat dis(img.height,img.width,CV_32F);
safeCall(hipMemcpy2D(dis.data,img.width*sizeof(float),img.d_data,img.pitch*sizeof(float),img.width*sizeof(float),(size_t) img.height,hipMemcpyDeviceToHost));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 200);
cvNamedWindow(name,CV_WINDOW_NORMAL);
imshow(name,gray);
}
void computePerOctave(CudaImage& base, std::vector<double> &sig, int nOctaveLayers){
std::vector<CudaImage> Octave;
Octave.resize(nOctaveLayers + 3);
Octave[0].copyDevice(base);
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
Octave[i].copyDevice(Octave[i-1]);
cuGaussianBlur(Octave[i],sig[i]);
}
//displayOctave(Octave);
std::vector<CudaImage> diffOctave;
diffOctave.resize(nOctaveLayers+2);
for(int i = 0;i<diffOctave.size();++i)
diffOctave[i].Allocate(Octave[0].width,Octave[0].height,Octave[0].pitch,NULL,NULL);
// float *d_data,pitch;
// safeCall(hipMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*Octave[0].width*5), (size_t)Octave[0].height));
dim3 Block(32,8);
dim3 Grid(iDivUp(Octave[0].pitch,Block.x),iDivUp(Octave[0].height,Block.y));
for(int i = 0;i<diffOctave.size();i++){
hipLaunchKernelGGL(( differenceImg), dim3(Grid),dim3(Block), 0, 0, Octave[i].d_data,Octave[i+1].d_data,diffOctave[i].d_data,Octave[0].pitch,Octave[0].height);
safeCall(hipDeviceSynchronize());
}
#ifdef SHOW
//displayOctave(diffOctave);
#endif
////////////////////
/// findScaleSpaceExtrema
////////////////////
int totPts = 0;
safeCall(hipMemcpyToSymbol(d_PointCounter, &totPts, sizeof(int)));
float *d_point;
hipMalloc(&d_point,sizeof(float)*maxPoints*2);
//for(int i = 0 ; i < diffOctave - 1;i++)
int i = 2;
//findScaleSpaceExtrema<<<Grid,Block>>>(diffOctave[i].d_data,diffOctave[i+1].d_data,diffOctave[i+2].d_data,d_point,Octave[0].width,Octave[0].pitch,Octave[0].height);
//safeCall(hipDeviceSynchronize());
// float *p[3+2];
// float d = 2;
// float *s = &d;
// p[0] = s;
// std::cout<<*(p[0])<<" "<< sizeof(float*) <<std::endl;
// test<<<1,1>>>(p);
float *h_pd[3+2];
for(int i = 0;i<5;i++)
h_pd[i] = diffOctave[i].d_data;
safeCall(hipMemcpyToSymbol(pd, h_pd, sizeof(float *)*5));
int width = Octave[0].width;
int pitch = Octave[0].pitch;
int heigh = Octave[0].height;
//findScaleSpaceExtrema<<<Grid,Block>>>(d_point,3,Octave[0].width,Octave[0].pitch,Octave[0].height);
safeCall(hipDeviceSynchronize());
#ifdef SHOW
disMatf("prve",diffOctave[i]);
disMatf("current",diffOctave[i+1]);
disMatf("next",diffOctave[i+2]);
waitKey(0);
#endif
// test<<<2,23>>>();
int num = 0;
safeCall(hipMemcpyFromSymbol(&num, d_PointCounter, sizeof(int)));
num = (num>2000)? 2000:num;
printf("width : %d , height : %d , num : %d \n",Octave[0].width,Octave[0].height,num);
float *h_points;
h_points = (float *)malloc(num*2*sizeof(float));
//h_points = new float[num*2];
safeCall(hipMemcpy(h_points,d_point,num*2*sizeof(float),hipMemcpyDeviceToHost));
std::vector<KeyPoint> keypoints;
keypoints.resize(num);
for(int i = 0;i<keypoints.size();++i)
{
keypoints[i].pt.x = h_points[i*2];
keypoints[i].pt.y = h_points[i*2+1];
}
#ifdef SHOW
Mat kepoint;
CudaImage &img = diffOctave[i+1];
Mat img_1(img.height,img.width,CV_32F);
safeCall(hipMemcpy2D(img_1.data,img.width*sizeof(float),diffOctave[i+1].d_data,img.pitch*sizeof(float),img.width*sizeof(float),(size_t) img.height,hipMemcpyDeviceToHost));
Mat gray;
img_1.convertTo(gray,DataType<uchar>::type, 1, 200);
drawKeypoints(gray,keypoints,kepoint);
//char *a ="../data/road.png";
//Mat img_1 = imread(a);
//drawKeypoints(img_1,keypoints,kepoint);
cvNamedWindow("extract_my",CV_WINDOW_NORMAL);
imshow("extract_my", kepoint);
waitKey(0);
#endif
}
/*disable*/
void disMatf(CudaImage &cuImg){
Mat dis(cuImg.height,cuImg.width,CV_32F);
for(int i = 0;i<dis.rows;i++)
{
float *p = dis.ptr<float>(i);
for(int j = 0;j<dis.cols;j++){
p[j] = cuImg.h_data[i*dis.cols+j];
//std::cout<<p[j]<<" ";
}
//std::cout<<std::endl;
}
//memcpy(dis.data,cuImg.h_data,cuImg.width*cuImg.height*sizeof(float));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ff",CV_WINDOW_NORMAL);
imshow("ff",gray);
waitKey();
}
| 874cc8391f0e6bc5e01e2c09288dadf3a53d498c.cu | #include "cusitf_function_H.h"
using namespace cv;
#define MESSAGE 1
texture<float, 1, cudaReadModeElementType> texRef;
/***********
//This is an adjustable option which control the gaussKernel size. \
//when the kernel size less than 32*2+1 or kernel radius less than 32,the ROW_HALO_STEP set 1 \
//and the COLUMNS_HALO_STEPS set 2 will has a good performance.But when the kernel size is more \
//than 32 less than 64, the ROW_HALO_STEP should set 2 and the COLUMNS_HALO_STEPS should set 4
//The ROW_HALO_STEP will set 1 and the COLUMNS_HALO_STEPS will set 2 by default
***********/
///////////////////////
/// \brief GaussianBlurKernelRow
/// \param d_data
/// \param out
/// \param w
/// \param h
/// \param ksize
/// \param pitch
/// Only support the kernel size less than 32*2+1(ROW_HALO_STEP*ROW_BLOCK_DIM_X(32) is the radius)
/// Reference the cuda-sample 'convolutionSeparable'.
/// The boundary is set 0 which is different from OpenCV.The reason I simplify the boundary is \
/// that the description of the sift need not the boundary of the image which will be filter out.
/// If adjust the ROW_HALO_STEP 2,that is ok.
//////////////////////
#define ROW_BLOCK_DIM_X 32
#define ROW_BLOCK_DIM_Y 8
#define ROW_UNROLL_STEPS 4
#define ROW_HALO_STEP 1
__global__ void GaussianBlurKernelRow(
float *d_data,
float *out,
int w,
int h,
int ksize,
int pitch
)
{
__shared__ float s[ROW_BLOCK_DIM_Y][ROW_BLOCK_DIM_X*(ROW_UNROLL_STEPS+ROW_HALO_STEP*2)];
//base shared memory coordinate
int baseX = (blockIdx.x*ROW_UNROLL_STEPS-ROW_HALO_STEP)*blockDim.x + threadIdx.x;
int baseY = blockIdx.y*blockDim.y+threadIdx.y;
//the data basing shared memory coordinate
d_data += baseY * pitch + baseX;
out += baseY * pitch + baseX;
//Load main data
#pragma unroll
for(int i = ROW_HALO_STEP;i<ROW_UNROLL_STEPS+ROW_HALO_STEP;i++)
s[threadIdx.y][threadIdx.x+ i * ROW_BLOCK_DIM_X] = (baseX + ROW_BLOCK_DIM_X * i < w ) ? d_data[ROW_BLOCK_DIM_X * i] : 0;
//Load left halo
//left halo exist when this is threads in the imgae patch.
#pragma unroll
for (int i = 0; i < ROW_HALO_STEP; i++)
{
s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X] = (baseX >= -i * ROW_BLOCK_DIM_X ) ? d_data[i * ROW_BLOCK_DIM_X] : 0;
}
//Load right halo
//left halo exist when this is threads in the imgae patch.
#pragma unroll
for (int i = ROW_HALO_STEP + ROW_UNROLL_STEPS; i < ROW_HALO_STEP + ROW_UNROLL_STEPS + ROW_HALO_STEP; i++)
{
s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X] = (w - baseX > i * ROW_BLOCK_DIM_X) ? d_data[i * ROW_BLOCK_DIM_X] : 0;
}
__syncthreads();
int b = (ksize -1) /2;
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_UNROLL_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b; j <= b; j++)
{
sum += coeffGaussKernel[b-j] * s[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X + j];
}
out[i * ROW_BLOCK_DIM_X] = sum;
}
//old version
// int b = (ksize -1) /2;
// if(x>=b && x<w-b && y>=0 && y<h){
// #pragma unroll
// float sum = 0;
// for(int i = -b;i<=b;i++){
// sum += d_data[y*pitch+x+i]*coeffGaussKernel[i+b];
// }
// out[y*pitch+x] = sum;
// }
}
///////////////////////////////////
/// \brief GaussianBlurKernelCol
/// \param d_data
/// \param out
/// \param w
/// \param h
/// \param ksize
/// \param pitch
/// There is a different with row that the col has not the pitch which could make sure the \
/// all thereds in image aera.
/// Reference the cuda-sample 'convolutionSeparable'
/// The boundary is set 0 which is different from OpenCV.The reason I simplify the boundary is \
/// that the description of the sift need not the boundary of the image which will be filter out.
/// The minimum y size is 64(COLUMNS_BLOCKDIM_Y*COLUMNS_RESULT_STEPS)
//////////////////////////////////
#define COLUMNS_BLOCKDIM_X 32
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 4
#define COLUMNS_HALO_STEPS 2
__global__ void GaussianBlurKernelCol(
float *d_data,
float *out,
int w,
int h,
int ksize,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_data += baseY * pitch + baseX;
out += baseY * pitch + baseX;
int b = (ksize -1) /2;
//fill the shared memory not consider the upper halo,so it limit the minimum y size is 64(COLUMNS_BLOCKDIM_Y*COLUMNS_RESULT_STEPS)
if(baseY + (COLUMNS_RESULT_STEPS+COLUMNS_HALO_STEPS)*COLUMNS_BLOCKDIM_Y >= h && baseY + COLUMNS_HALO_STEPS*COLUMNS_BLOCKDIM_Y < h)
{
//Main data and lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS*2 ; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y < h) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
__syncthreads();
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b ; j <= b; j++)
{
sum += coeffGaussKernel[b - j]* s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
if(baseY + i * COLUMNS_BLOCKDIM_Y < h) {
out[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
return;
}
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_data[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (h - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_data[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -b ; j <= b; j++)
{
sum += coeffGaussKernel[b - j]* s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
out[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
#if 0
if(y>=b && y<h-b && x>=0 && x<w){
#pragma unroll
for(int i = 0;i<ksize;i++){
if(i<b){
out[y*pitch+x] += d_data[(y-b+i)*pitch+x]*coeffGaussKernel[i];
}
else{
out[y*pitch+x] += d_data[(y+i-b)*pitch+x]*coeffGaussKernel[i];
}
}
}
#else
// if(y>=b && y<h-b && x>=0 && x<w){
// #pragma unroll
// float sum = 0;
// for(int i = -b;i<=b;i++){
// sum += d_data[(y+i)*pitch+x]*coeffGaussKernel[i+b];
// }
// out[y*pitch+x] = sum;
// }
#endif
}
__global__ void GaussianBlurKernelRTex(float *out,int w,int h,int ksize)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int b = (ksize -1) /2;
if(x>=b && x<w-b && y>=0 && y<h){
#pragma unroll
for(int i = 0;i<ksize;i++){
if(i<b){
out[y*w+x] += tex1Dfetch(texRef,y*w+x-b+i)*coeffGaussKernel[i];
}
else{
out[y*w+x] += tex1Dfetch(texRef,y*w+x+i-b)*coeffGaussKernel[i];
}
}
}
}
__global__ void differenceImg(float *d_Octave0,float *d_Octave1,float *d_diffOctave,int pitch,int height){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int index = y * pitch + x;
if(y<height)
d_diffOctave[index] = (d_Octave1[index] - d_Octave0[index]);
}
__global__ void findScaleSpaceExtrema(float *prev,float *img,float *next,float *d_point,int width ,int pitch ,int height)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if(x<1 || y<1 || x>=width-1 || y>=height-1)
// return;
const int BLOCKDIMX = 32;
const int BLOCKDIMY = 8;
__shared__ float Mem0[BLOCKDIMY+2][BLOCKDIMX+2];
__shared__ float Mem1[BLOCKDIMY+2][BLOCKDIMX+2];
__shared__ float Mem2[BLOCKDIMY+2][BLOCKDIMX+2];
//the count of the extrema points in current block;
__shared__ unsigned int cnt;
//points storage in shared memory
__shared__ unsigned short points[96];
// float *ptr0 = prev[y * pitch + x];
// float *ptr1 = img[y * pitch + x];
// float *ptr2 = next[y * pitch + x];
prev += ( y-1 ) * pitch + x - 1;
img += ( y-1 ) * pitch + x - 1;
next += ( y-1 ) * pitch + x - 1;
Mem0[ty][tx] = (x<0||y<0)? 0:prev[0];
Mem1[ty][tx] = (x<0||y<0)? 0:img[0];
Mem2[ty][tx] = (x<0||y<0)? 0:next[0];
// Mem1[ty][32] = -400;
// Mem1[8][tx] = -400;
// Mem1[8][32] = -400;
//prev[0] = 250;
if(tx == 0 && ty == 0){
#pragma unroll
for(int i = BLOCKDIMY;i<BLOCKDIMY + 2;i++)
#pragma unroll
for(int j = 0;j<BLOCKDIMX+2;j++){
Mem0[i][j] = (x<width||y<height)? prev[i*pitch + j]:0;
Mem1[i][j] = (x<width||y<height)? img[i*pitch + j]:0;
Mem2[i][j] = (x<width||y<height)? next[i*pitch + j]:0;
}
#pragma unroll
for(int i = 0;i<BLOCKDIMY;i++)
#pragma unroll
for(int j = BLOCKDIMX;j<2+BLOCKDIMX;j++){
Mem0[i][j] = (x<width||y<height)? prev[i*pitch + j]:0;
Mem1[i][j] = (x<width||y<height)? img[i*pitch + j]:0;
Mem2[i][j] = (x<width||y<height)? next[i*pitch + j]:0;
}
cnt = 0;
//for points count synchronism
}
__syncthreads();
prev += pitch + 1;
img += pitch + 1;
next += pitch + 1;
// prev[0] = Mem0[ty+1][tx+1] + 200;
// img[0] = Mem1[ty+1][tx+1] + 200;
// next[0] = Mem2[ty+1][tx+1] + 200 ;
//next[0] = Mem2[ty+1][tx+1]*50 ;
const int threshold = int(0.5 * 0.04 / 3 * 255);
float val = img[0];
int c = 0;
int step = pitch;
float *currptr = img;
float *nextptr = next;
float *prevptr = prev;
if( std::abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = x;
points[3*pos+1] = y;
//points[3*pos+2] = scale;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>=2000 ? 2000-1 : idx);
d_point[idx*2] = x;
d_point[idx*2+1] = y;
printf("cnt : %d , x = %d , y = %d,asd: %f \n",idx,x,y,d_point[idx*2]);
}
}
__device__ void addpoint(){
}
//////
/// \brief findScaleSpaceExtrema
/// \param d_point
/// \param s
/// \param width
/// \param pitch
/// \param height
/// \param threshold
/// \param nOctaveLayers
/// \param maxNum
////////////
/// s is the index in dog
__global__ void findScaleSpaceExtrema(float *d_point,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
#if 1
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
#else
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
#endif
/******************calOrientationHist*****************/
{
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
//float hist[n];
//the procress of all point range, a square space.
int k, len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = new float[len*4 + n+4 + n];
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = W + len + 2,*hist = temphist+n+2;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
// if(k == 0)
// printf("temphist[%d]: %f , Mag[k] : %f , Y[k] : %f \n",bin,temphist[bin],Mag[k],Y[k]);
//printf("bin : %d , Mag[k]: %f, W[k]: %f ,temphist[bin] %f \n",bin,Mag[k],W[k],temphist[bin]);
//printf("Mag[k] : %f, X[k] : %f , Y[k] : %f \n",Mag[k],X[k],Y[k]);
k++;
}
}
//printf("pixel : %f \n",currptr[0]);
// for(int i = 0;i<len;i++)
// {
// Ori[i] = atan2f(Y[i],X[i]);
// Mag[i] = sqrtf(Y[i]*Y[i]+X[i]*X[i]);
// }
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
// kpt.angle = 360.f - (float)((360.f/n) * bin);
// if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
// kpt.angle = 0.f;
//addpoint;
#if 1
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
// kpt.pt.x = (c + xc) * (1 << octv);
// kpt.pt.y = (r + xr) * (1 << octv);
// kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
// //why '*2'
// kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
// kpt.response = std::abs(contr);
#else
#endif
}
}
delete []buf;
}//orientation
}//extrema
}
__device__ void calcDescriptors(float* currptr,int x,int y,float scl_octv,int pitch,int width,int height,float ori,float* d_decriptor,int index)
{
//description array
//calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
//x,y,360-angle,scl,d,n
//static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
//x y is the x y in prymid image
//scl_octv is the related scale in octave
//x,y,scl_octv has been calculated above
/******************calcDescriptor*****************/
int d = SIFT_DESCR_WIDTH,n = SIFT_DESCR_HIST_BINS;
ori = 360.f - ori;
if(std::abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
float cos_t = cosf(ori*(float)(CV_PI/180));
float sin_t = sinf(ori*(float)(CV_PI/180));
//n=8
float bins_per_rad = n / 360.f;
float exp_scale = -1.f/(d * d * 0.5f);
//3*scale,normalized 3*scale to 1
float hist_width = SIFT_DESCR_SCL_FCTR * scl_octv;
int radius = int(hist_width * 1.4142135623730951f * (d + 1) * 0.5f+0.5);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = min(radius, (int) sqrt(((double) width)*width + ((double) height)*height));
cos_t /= hist_width;
sin_t /= hist_width;
//len 为特征点邻域区域内像素的数量,histlen 为直方图的数量,即特征矢量的长度,实际应为d×d×n,之所以每个变量
//又加上了2,是因为要为圆周循环留出一定的内存空间
int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
float dst[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS];
int rows = height, cols = width;
float *buf = new float[len*6 + histlen];
//Memory arrangment:
// Mag
// X Y Ori W RBin CBin hist
// -_____-_____-_____-_____-_____-_____-__
//
float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
//init *hist = {0},because following code will use '+='
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
//traverse the boundary rectangle
//calculate two improtant data
//1.all dx,dy,w,ori,mag in image coordinary
//2.all x,y in bins coordinary(a relate coordinary)
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
// Calculate sample's histogram array coords rotated relative to ori.
// Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
// r_rot = 1.5) have full weight placed in row 1 after interpolation.
float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = y + i, c = x + j;
//d = 4
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
// float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
// float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
// W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
W[k] = __expf((c_rot * c_rot + r_rot * r_rot)*exp_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
k++;
}
}
k = 0;
for( ; k < len; k++ )
{
float rbin = RBin[k], cbin = CBin[k];
float obin = (Ori[k] - ori)*bins_per_rad;
float mag = Mag[k]*W[k];
int r0 = rbin - (int)rbin ;
int c0 = cbin - (int)cbin;
int o0 = obin - (int)obin;
rbin -= r0;
cbin -= c0;
obin -= o0;
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(n+2)] += v_rco010;
hist[idx+(n+3)] += v_rco011;
hist[idx+(d+2)*(n+2)] += v_rco100;
hist[idx+(d+2)*(n+2)+1] += v_rco101;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
}
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
{
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
hist[idx] += hist[idx+n];
hist[idx+1] += hist[idx+n+1];
for( k = 0; k < n; k++ )
dst[(i*d + j)*n + k] = hist[idx+k];
}
// copy histogram to the descriptor,
// apply hysteresis thresholding
// and scale the result, so that it can be easily converted
// to byte array
float nrm2 = 0;
len = d*d*n;
k = 0;
for( ; k < len; k++ )
nrm2 += dst[k]*dst[k];
float thr = sqrtf(nrm2)*SIFT_DESCR_MAG_THR;
i = 0, nrm2 = 0;
for( ; i < len; i++ )
{
float val = min(dst[i], thr);
dst[i] = val;
nrm2 += val*val;
}
nrm2 = SIFT_INT_DESCR_FCTR/max(sqrtf(nrm2), FLT_EPSILON);
for( ; k < len; k++ )
{
//dst[k] = (uchar)(dst[k]*nrm2);
d_decriptor[index*len + k] = (uchar)(dst[k]*nrm2);
}
delete []buf;
}
__global__ void calcPerOctaveLayers(float *d_point,float* d_decriptor,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger for next iteration
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
#if 1
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
#else
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
#endif
float ori = 0.;
float scl_octv = size*0.5f/(1 << o);
unsigned int idx_arr[2];
float ori_arr[2];
int num_idx = 0;
/******************calOrientationHist*****************/
{
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
//float hist[n];
//the procress of all point range, a square space.
int k, len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = new float[len*4 + n+4 + n];
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = W + len + 2,*hist = temphist+n+2;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
// if(k == 0)
// printf("temphist[%d]: %f , Mag[k] : %f , Y[k] : %f \n",bin,temphist[bin],Mag[k],Y[k]);
//printf("bin : %d , Mag[k]: %f, W[k]: %f ,temphist[bin] %f \n",bin,Mag[k],W[k],temphist[bin]);
//printf("Mag[k] : %f, X[k] : %f , Y[k] : %f \n",Mag[k],X[k],Y[k]);
k++;
}
}
//printf("pixel : %f \n",currptr[0]);
// for(int i = 0;i<len;i++)
// {
// Ori[i] = atan2f(Y[i],X[i]);
// Mag[i] = sqrtf(Y[i]*Y[i]+X[i]*X[i]);
// }
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
// kpt.angle = 360.f - (float)((360.f/n) * bin);
// if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
// kpt.angle = 0.f;
//addpoint;
#if 1
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
ori = 360.f - (float)((360.f/n) * bin);
if(abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
d_point[idx*KEYPOINTS_SIZE+5] = ori;
// kpt.pt.x = (c + xc) * (1 << octv);
// kpt.pt.y = (r + xr) * (1 << octv);
// kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
// //why '*2'
// kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
// kpt.response = std::abs(contr);
ori_arr[num_idx] = ori;
idx_arr[num_idx] = idx;
num_idx++;
#else
#endif
}
}
delete []buf;
}//orientation
num_idx = min(num_idx,2);
for(int i = 0;i<num_idx;i++)
calcDescriptors(currptr,x,y,scl_octv,pitch,width,height,ori_arr[num_idx],d_decriptor,idx_arr[num_idx]);
}//extrema
}
__global__ void findScaleSpaceExtrema_gpu(float *d_point,int s, int width ,int pitch ,int height,const int threshold,const int nOctaveLayers,const int maxNum){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
//avoid extract the unstable border points
if(y >= height - SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER || x<SIFT_IMG_BORDER || y<SIFT_IMG_BORDER)
return;
float *currptr = pd[s] +y*pitch+x;
float *prevptr = pd[s-1]+y*pitch+x;
float *nextptr = pd[s+1]+y*pitch+x;
int o = s/(nOctaveLayers+2);
float val = *currptr;
int step = pitch;
int c = 0;
if( abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
(val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
{
/*adjustLocalExtrema*/
const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
float Vs=0, Vx=0, Vy=0, contr=0;
float dx,dy,ds,dxx,dyy,dxy;
int j = 0,layer;
//get the x,y,s,Vs,Vx,Vy or return
for( ; j < SIFT_MAX_INTERP_STEPS; j++ )
{
currptr = pd[s] +y*pitch+x;
prevptr = pd[s-1]+y*pitch+x;
nextptr = pd[s+1]+y*pitch+x;
//the first derivative of x,y and scale
dx = (currptr[1] - currptr[-1])*deriv_scale;
dy = (currptr[pitch] - currptr[-pitch])*deriv_scale;;
ds = (nextptr[0] - prevptr[0])*deriv_scale;
float v2 = currptr[0]*2;
//the second derivative of x,y,scale
dxx = (currptr[1] + currptr[-1] - v2)*second_deriv_scale;
dyy = (currptr[pitch] + currptr[-pitch] - v2)*second_deriv_scale;
float dss = (nextptr[0] + prevptr[0] - v2)*second_deriv_scale;
dxy = (currptr[pitch+1] - currptr[1-pitch] -
currptr[-1+pitch] + currptr[-pitch-1])*cross_deriv_scale;
float dxs = (nextptr[1] - nextptr[-1] -
prevptr[1] + prevptr[-1])*cross_deriv_scale;
float dys = (nextptr[pitch] - nextptr[-pitch] -
prevptr[pitch] + prevptr[-pitch])*cross_deriv_scale;
//Algebraic cousin
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
//idet is the det,the matrix's determinant countdown
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
////////////////////////
/// A(dxx, dxy, dxs,
/// dxy, dyy, dys,
/// dxs, dys, dss);
///
/// A*(idxx, idxy, idxs,
/// idxy, idyy, idys,
/// idxs, idys, idss);
///
/// B(dx,dy,dz)
/////////////////////////
//dX = (A^-1)*B
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
Vx = -pdx;
Vy = -pdy;
Vs = -pds;
//because of the judgment is before the updated value,so
//this iteration final get the x,y,s(intger) and the Vx,Vy,Vz(<0.5).
//The accurate extrema location is x+Vx,y+Vy.
if( abs(Vs) < 0.5f && abs(Vx) < 0.5f && abs(Vy) < 0.5f )
break;
//get nearest intger
x += int(Vx > 0 ? ( Vx + 0.5 ) : (Vx - 0.5));
y += int(Vy > 0 ? ( Vy + 0.5 ) : (Vy - 0.5));
s += int(Vs > 0 ? ( Vs + 0.5 ) : (Vs - 0.5));
layer = s - o*(nOctaveLayers+2);
if( layer < 1 || layer > nOctaveLayers ||
y < SIFT_IMG_BORDER || y >= height - SIFT_IMG_BORDER ||
x < SIFT_IMG_BORDER || x >= width - SIFT_IMG_BORDER )
return;
}//for
if( j >= SIFT_MAX_INTERP_STEPS )
return;
//After the iterative,get the x,y,s,(Vx,Vy,Vs)(<0.5).
{
//remove the small energy points which essily influenced by image noise
float t = dx*Vx + dy*Vy + ds*Vs;
contr = currptr[0]*img_scale + t * 0.5f;
if( abs( contr ) * nOctaveLayers < 0.04 )
return;
// principal curvatures are computed using the trace and det of Hessian
float tr = dxx + dyy;
float det = dxx*dyy-dxy*dxy;
if( det <= 0 || tr*tr*10 >= (10 + 1)*(10 + 1)*det )
return;
}
layer = s - o*(nOctaveLayers+2);
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = (x + Vx)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+1] = (y + Vy)*(1 << o);
d_point[idx*KEYPOINTS_SIZE+2] = o + (s<<8) + ((int)(((Vs + 0.5)*255)+0.5) << 16);
float size = 1.6*__powf(2.f, (layer + Vs) / nOctaveLayers)*(1 << o)*2;
d_point[idx*KEYPOINTS_SIZE+3] = size;
d_point[idx*KEYPOINTS_SIZE+4] = abs(contr);
d_point[idx*KEYPOINTS_SIZE+6] = s;
d_point[idx*KEYPOINTS_SIZE+7] = x;
d_point[idx*KEYPOINTS_SIZE+8] = y;
//temsize+=size*0.5f/(1 << o)*SIFT_ORI_RADIUS+0.5;
float scl_octv = size*0.5f/(1 << o);
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5;
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//int temBuffSize = len*4+2*SIFT_ORI_HIST_BINS+2;
atomicMax(&temsize,len);
}
}
__global__ void calcOrientationHist_gpu(float *d_point,float* temdata,const int buffSize,const int pointsNum,const int maxNum,const int nOctaveLayers)
{
//int x = blockIdx.x*blockDim.x+threadIdx.x;
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORY
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size =s_point[threadIdx.x*KEYPOINTS_SIZE+3];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float* currptr;
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
float* hists = new float[2*n+4];
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = temdata+pointIndex*buffSize;
//float *buf = (float *)malloc((len*4 + n+4 + n)*sizeof(float));
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = hists + 2;
float* hist = temphist + 2+n;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
// if(radius > 16)
// printf("radius: %d, point index : %d\n",radius,pointIndex);
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
X[k] = dx;
Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
W[k] = __expf((i*i + j*j)*expf_scale);
Ori[k] = atan2f(dy,dx);
Mag[k] = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(Ori[k]*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += W[k]*Mag[k];
k++;
}
}
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
#ifdef SHAREMEMORY
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = s_point[threadIdx.x*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = s_point[threadIdx.x*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = s_point[threadIdx.x*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = s_point[threadIdx.x*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
d_point[idx*KEYPOINTS_SIZE+6] = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
d_point[idx*KEYPOINTS_SIZE+7] = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
d_point[idx*KEYPOINTS_SIZE+8] = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
}
#else
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = d_point[pointIndex*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = d_point[pointIndex*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = d_point[pointIndex*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = d_point[pointIndex*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = d_point[pointIndex*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
}
#endif
}
}
delete []hists;
}
__global__ void calcOrientationHist_gpu1(float *d_point,float* temdata,const int buffSize,const int pointsNum,const int maxNum,const int nOctaveLayers)
{
//int x = blockIdx.x*blockDim.x+threadIdx.x;
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORY
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size =s_point[threadIdx.x*KEYPOINTS_SIZE+3];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float* currptr;
//currptr is the current dog image where the current extrema point in.
//x,y,s is the current location in dog images.
//Note: s is the absolutely scale location and the 'laryer' is the /
//relatively location in the octave which range is 1~3.
//The orientation is compute in gausspyrmid,so the currptr renew:
currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
//simga*2^s/S,the simga the simga relative to the octave.
float scl_octv = size*0.5f/(1 << o);
float omax;
float sigma_ori = SIFT_ORI_SIG_FCTR * scl_octv;
//'+0.5' for rounding because scl_octv>0
int radius = SIFT_ORI_RADIUS * scl_octv+0.5,n = SIFT_ORI_HIST_BINS;
float* hists = new float[2*n+4];
//the procress of all point range, a square space.
int len = (radius*2+1)*(radius*2+1);
//garuss smooth's coefficient
float expf_scale = -1.f/(2.f * sigma_ori * sigma_ori);
//n = 36
float *buf = temdata+pointIndex*buffSize;
//float *buf = (float *)malloc((len*4 + n+4 + n)*sizeof(float));
//the buf is a memory storage the temporary data.
//The frist len is the Mag('fu zhi')and X,second len is the Y,third len is the Ori,
//the forth is gauss weight(len+2)
//the temphist is(n + 2).
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
//gradient direction histogarm
float* temphist = hists + 2;
float* hist = temphist + 2+n;
for( int i = 0; i < n; i++ )
temphist[i] = 0.f;
// if(radius > 16)
// printf("radius: %d, point index : %d\n",radius,pointIndex);
for( int i = -radius, k = 0; i <= radius; i++ )
{
int yi = y + i;
// '=' avoid out of memory for i-1,j-1 following
if( yi <= 0 || yi >= height - 1 )
continue;
for( int j = -radius; j <= radius; j++ )
{
int xi = x + j;
if( xi <= 0 || xi >= width - 1 )
continue;
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
//X[k] = dx;
//Y[k] = dy;
//Wight not multiply 1/pi,because the compute of oritentation
//only need the relative wight.
float wk,ok,mk;
// W[k] = __expf((i*i + j*j)*expf_scale);
// Ori[k] = atan2f(dy,dx);
// Mag[k] = sqrtf(dy*dy+dx*dx);
wk = __expf((i*i + j*j)*expf_scale);
ok = atan2f(dy,dx);
mk = sqrtf(dy*dy+dx*dx);
//cvRound((ori/pi+180)/360*36)
float tembin = __fdividef(__fdividef(ok*180,CV_PI),360/n);
int bin = tembin > 0 ? tembin + 0.5:tembin - 0.5;
if( bin >= n )
bin -= n;
if( bin < 0 )
bin += n;
temphist[bin] += wk*mk;
k++;
}
}
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
temphist[n] = temphist[0];
temphist[n+1] = temphist[1];
for(int i = 0; i < n; i++ )
{
hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
omax = hist[0];
for( int i = 1; i < n; i++ )
omax = fmaxf(omax, hist[i]);
//printf("omax : %f \n",omax);
float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
int r2 = j < n-1 ? j + 1 : 0;
if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
#ifdef SHAREMEMORY
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = s_point[threadIdx.x*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = s_point[threadIdx.x*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = s_point[threadIdx.x*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = s_point[threadIdx.x*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
d_point[idx*KEYPOINTS_SIZE+6] = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
d_point[idx*KEYPOINTS_SIZE+7] = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
d_point[idx*KEYPOINTS_SIZE+8] = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
}
#else
if(hist[j] == omax)
d_point[pointIndex*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
else{
//addpoint;
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>maxNum ? maxNum-1 : idx);
d_point[idx*KEYPOINTS_SIZE] = d_point[pointIndex*KEYPOINTS_SIZE];
d_point[idx*KEYPOINTS_SIZE+1] = d_point[pointIndex*KEYPOINTS_SIZE+1];
d_point[idx*KEYPOINTS_SIZE+2] = d_point[pointIndex*KEYPOINTS_SIZE+2];
d_point[idx*KEYPOINTS_SIZE+3] = d_point[pointIndex*KEYPOINTS_SIZE+3];
d_point[idx*KEYPOINTS_SIZE+4] = d_point[pointIndex*KEYPOINTS_SIZE+4];
d_point[idx*KEYPOINTS_SIZE+5] = 360.f - (float)((360.f/n) * bin);
}
#endif
}
}
delete []hists;
}
__global__ void calcSIFTDescriptor_gpu(float *d_point,float* d_decriptor,int pointsNum,int nOctaveLayers)
{
//float* currptr,int x,int y,float scl_octv,int pitch,int width,int height,float ori,float* d_decriptor,int index
//description array
//calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
//x,y,360-angle,scl,d,n
//static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
//x y is the x y in prymid image
//scl_octv is the related scale in octave
//x,y,scl_octv has been calculated above
/******************calcDescriptor*****************/
int pointIndex = blockIdx.x*blockDim.x+threadIdx.x;
if(pointIndex>=pointsNum)
return;
#define SHAREMEMORY
#ifdef SHAREMEMORYa
__shared__ float s_point[BLOCK_SIZE_ONE_DIM*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE] =d_point[pointIndex*KEYPOINTS_SIZE];
s_point[threadIdx.x*KEYPOINTS_SIZE+1] =d_point[pointIndex*KEYPOINTS_SIZE+1];
s_point[threadIdx.x*KEYPOINTS_SIZE+2] =d_point[pointIndex*KEYPOINTS_SIZE+2];
s_point[threadIdx.x*KEYPOINTS_SIZE+3] =d_point[pointIndex*KEYPOINTS_SIZE+3];
s_point[threadIdx.x*KEYPOINTS_SIZE+4] =d_point[pointIndex*KEYPOINTS_SIZE+4];
s_point[threadIdx.x*KEYPOINTS_SIZE+5] =d_point[pointIndex*KEYPOINTS_SIZE+5];
s_point[threadIdx.x*KEYPOINTS_SIZE+6] =d_point[pointIndex*KEYPOINTS_SIZE+6];
s_point[threadIdx.x*KEYPOINTS_SIZE+7] =d_point[pointIndex*KEYPOINTS_SIZE+7];
s_point[threadIdx.x*KEYPOINTS_SIZE+8] =d_point[pointIndex*KEYPOINTS_SIZE+8];
__syncthreads();
float size = s_point[threadIdx.x*KEYPOINTS_SIZE+3];
float ori = s_point[threadIdx.x*KEYPOINTS_SIZE+5];
int s = s_point[threadIdx.x*KEYPOINTS_SIZE+6];
int x = s_point[threadIdx.x*KEYPOINTS_SIZE+7];
int y = s_point[threadIdx.x*KEYPOINTS_SIZE+8];
#else
float size =d_point[pointIndex*KEYPOINTS_SIZE+3];
float ori = d_point[pointIndex*KEYPOINTS_SIZE+5];
int s = d_point[pointIndex*KEYPOINTS_SIZE+6];
int x = d_point[pointIndex*KEYPOINTS_SIZE+7];
int y = d_point[pointIndex*KEYPOINTS_SIZE+8];
#endif
int o = s/(nOctaveLayers+2);
int layer = s - o*(nOctaveLayers+2);
float scl_octv = size/((1 << o)*2);
int width = d_oIndex[o*3];
int height = d_oIndex[o*3+1];
int pitch = d_oIndex[o*3+2];
float *currptr = pgpyr[o*(nOctaveLayers+3) + layer]+y*pitch+x;
int d = SIFT_DESCR_WIDTH,n = SIFT_DESCR_HIST_BINS;
ori = 360.f - ori;
if(std::abs(ori - 360.f) < FLT_EPSILON)
ori = 0.f;
//printf(" %d,%d,%f,%f,%f ",x,y,*currptr,ori,scl_octv);
//printf(" %d,%d,%f ",x,y,*(pgpyr[o*(nOctaveLayers+3) + layer]+1));
float cos_t = cosf(ori*(float)(CV_PI/180));
float sin_t = sinf(ori*(float)(CV_PI/180));
//n=8
float bins_per_rad = n / 360.f;
float exp_scale = -1.f/(d * d * 0.5f);
//3*scale,normalized 3*scale to 1
float hist_width = SIFT_DESCR_SCL_FCTR * scl_octv;
int radius = int(hist_width * 1.4142135623730951f * (d + 1) * 0.5f+0.5);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = min(radius, (int) sqrt(((double) width)*width + ((double) height)*height));
cos_t /= hist_width;
sin_t /= hist_width;
//len 为特征点邻域区域内像素的数量,histlen 为直方图的数量,即特征矢量的长度,实际应为d×d×n,之所以每个变量
//又加上了2,是因为要为圆周循环留出一定的内存空间
int i, j, k, len = (radius*2+1)*(radius*2+1);
__shared__ float dst1[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS*BLOCK_SIZE_ONE_DIM];
float* dst = dst1+threadIdx.x*d*d*n;
//float dst[SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS];
int rows = height, cols = width;
//float *buf = new float[len*6 + histlen];
const int histlen = (SIFT_DESCR_WIDTH+2)*(SIFT_DESCR_WIDTH+2)*(SIFT_DESCR_HIST_BINS+2);
float hist[histlen];
//__shared__ float hist[histlen*BLOCK_SIZE_ONE_DIM];
//init *hist = {0},because following code will use '+='
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
//traverse the boundary rectangle
//calculate two improtant data
//1.all dx,dy,w,ori,mag in image coordinary
//2.all x,y in bins coordinary(a relate coordinary)
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
// Calculate sample's histogram array coords rotated relative to ori.
// Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
// r_rot = 1.5) have full weight placed in row 1 after interpolation.
float c_rot = j * cos_t - i * sin_t;
float r_rot = j * sin_t + i * cos_t;
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = y + i, c = x + j;
//d = 4
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
float dx = (float)(currptr[i*pitch+j+1] - currptr[i*pitch+j-1]);
//the positive direction is from bottom to top contrary to the image /
//from top to bottom.So dy = y-1 - (y+1).
float dy = (float)(currptr[(i-1)*pitch+j] - currptr[(i+1)*pitch+j]);
// float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
// float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
//X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
// W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
float wk,ok,mk;
wk = __expf((c_rot * c_rot + r_rot * r_rot)*exp_scale);
ok = atan2f(dy,dx);
ok = (ok*180/CV_PI);
ok = ok<0? ok+360:ok;
mk = sqrtf(dy*dy+dx*dx);
//float rbin = RBin[k], cbin = CBin[k];
float obin = (ok - ori)*bins_per_rad;
float mag = mk*wk;
int r0 = floor(rbin);
int c0 = floor(cbin);
int o0 = floor(obin);
rbin -= r0;
cbin -= c0;
obin -= o0;
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
// if(x == 1936 && y ==744 ){
// printf("k: %d,rbin: %f cbin: %f obin: %f mag: %f ok: %f\n",k,rbin,cbin,obin,mag,ok);
// }
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(n+2)] += v_rco010;
hist[idx+(n+3)] += v_rco011;
hist[idx+(d+2)*(n+2)] += v_rco100;
hist[idx+(d+2)*(n+2)+1] += v_rco101;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
k++;
}
}
// if(x == 1936 && y ==744 ){
// for(int i =0;i<360;i++)
// printf(" %f ",hist[i]);
// printf("k: %d",k);
// }
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
{
int idx = ((i+1)*(d+2) + (j+1))*(n+2);
hist[idx] += hist[idx+n];
hist[idx+1] += hist[idx+n+1];
for( k = 0; k < n; k++ )
dst[(i*d + j)*n + k] = hist[idx+k];
}
// copy histogram to the descriptor,
// apply hysteresis thresholding
// and scale the result, so that it can be easily converted
// to byte array
float nrm2 = 0;
len = d*d*n;
k = 0;
for( ; k < len; k++ )
nrm2 += dst[k]*dst[k];
float thr = sqrtf(nrm2)*SIFT_DESCR_MAG_THR;
i = 0, nrm2 = 0;
for( ; i < len; i++ )
{
float val = min(dst[i], thr);
dst[i] = val;
nrm2 += val*val;
}
__syncthreads();
nrm2 = SIFT_INT_DESCR_FCTR/max(sqrtf(nrm2), FLT_EPSILON);
k = 0;
for( ; k < len; k++ )
{
//dst[k] = (uchar)(dst[k]*nrm2);
d_decriptor[pointIndex*len + k] = (uchar)(dst[k]*nrm2);
// if(x == 21 && y ==257 ){
// printf("k: %d,%f \n",k,d_decriptor[pointIndex*len + k]);
// }
}
}
// Scale down thread block width
#define SCALEDOWN_W 160
// Scale down thread block height
#define SCALEDOWN_H 16
__constant__ float d_Kernel1[5];
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
}
__global__ void test()
{
// unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
// printf("cnt : %d \n",d_PointCounter[0]);
}
void testDiffimage(float *d_Octave0,float *d_Octave1,float *d_diffOctave,int pitch,int height){
dim3 Block(32,8);
dim3 Grid(iDivUp(pitch,Block.x),iDivUp(height,Block.y));
differenceImg<<<Grid,Block>>>(d_Octave0,d_Octave1,d_diffOctave,pitch,height);
safeCall(cudaDeviceSynchronize());
}
//input cudaImage and output cudaImage which d_data has been smooth
void cuGaussianBlur(CudaImage &cuImg,float sigma)
{
assert(sigma>0);
int kernelSize = 0;
//sigma = sqrtf(sigma * sigma - 0.5 * 0.5 * 4);
//why the
//ksize.width = cvRound(sigma*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
kernelSize = cvRound(sigma*4*2 + 1)|1;
assert( kernelSize < 32*2+1 );
Mat kx;
kx = getGaussianKernel(kernelSize,sigma,CV_32F);
CHECK(cudaMemcpyToSymbol(coeffGaussKernel,(float*)kx.data,sizeof(float)*kernelSize));
dim3 BlockRow(ROW_BLOCK_DIM_X,ROW_BLOCK_DIM_Y);
dim3 GridRow(iDivUp(cuImg.pitch,BlockRow.x*ROW_UNROLL_STEPS),iDivUp(cuImg.height,BlockRow.y));
float *tmp_data,*tmp_data1;
size_t pitch;
safeCall(cudaMallocPitch((void**)&tmp_data, (size_t*) &pitch, (size_t) cuImg.width*sizeof(float), (size_t) cuImg.height));
GaussianBlurKernelRow<<<GridRow,BlockRow>>>(cuImg.d_data,tmp_data,cuImg.width,cuImg.height,kernelSize,cuImg.pitch);
safeCall(cudaDeviceSynchronize());
safeCall(cudaMallocPitch((void**)&tmp_data1, (size_t*) &pitch, (size_t) cuImg.width*sizeof(float), (size_t) cuImg.height));
dim3 BlockCol(COLUMNS_BLOCKDIM_X,COLUMNS_BLOCKDIM_Y);
dim3 GridCol(iDivUp(cuImg.pitch,BlockCol.x),iDivUp(cuImg.height,BlockCol.y*COLUMNS_RESULT_STEPS));
GaussianBlurKernelCol<<<GridCol,BlockCol>>>(tmp_data,tmp_data1,cuImg.width,cuImg.height,kernelSize,cuImg.pitch);
safeCall(cudaDeviceSynchronize());
/*device data has not copy to host yet*/
safeCall(cudaMemcpy2D(cuImg.d_data,cuImg.pitch*sizeof(float),tmp_data1,cuImg.pitch*sizeof(float),cuImg.width*sizeof(float),(size_t) cuImg.height,cudaMemcpyDeviceToDevice));
#if 0
Mat dis(cuImg.height,cuImg.width,CV_32F);
safeCall(cudaMemcpy2D(dis.data,cuImg.width*sizeof(float),tmp_data1,cuImg.pitch*sizeof(float),cuImg.width*sizeof(float),(size_t) cuImg.height,cudaMemcpyDeviceToHost));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey();
#endif
cudaFree(tmp_data);
cudaFree(tmp_data1);
#if MESSAGE == 0
std::cout<<kernelSize<<std::endl;
for(int i= 0 ;i<kx.rows;i++)
for(int j = 0;j<kx.cols;j++){
std::cout<<kx.at<float>(i,j)<<std::endl;
}
#endif
}
void createInitialImage(const Mat &src, CudaImage &base, float sigma,bool doubleImageSize)
{
int width = src.cols;
int height = src.rows;
if(!src.data){
printf("input none data !");
return;
}
Mat gray, gray_fpt;
if( src.channels() == 3 || src.channels() == 4 )
{
cvtColor(src, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<float>::type, 1, 0);
}
else
src.convertTo(gray_fpt, DataType<float>::type, 1, 0);
//sigma different which is sqrt(1.6*1.6-0.5*0.5*4)
float sig_diff;
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
resize(gray_fpt, gray_fpt, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
width = gray_fpt.cols;
height = gray_fpt.rows;
base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
base.Download();
cuGaussianBlur(base,sig_diff);
}
else
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
base.Allocate(width,height,iAlignUp(width, 128),false,NULL,(float*)gray_fpt.data);
base.Download();
cuGaussianBlur(base,sig_diff);
//GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
}
}
double ScaleDown(CudaImage &res, CudaImage &src, float variance)
{
if (res.d_data==NULL || src.d_data==NULL) {
printf("ScaleDown: missing data\n");
return 0.0;
}
// double a = 0.6;
// float h_Kernel[5] = {1.0/4 - a/2.0, 1.0/4, a, 1.0/4, 1.0/4 - a/2.0};
float h_Kernel[5];
float kernelSum = 0.0f;
for (int j=0;j<5;j++) {
h_Kernel[j] = (float)expf(-(double)(j-2)*(j-2)/2.0/variance);
kernelSum += h_Kernel[j];
}
for (int j=0;j<5;j++)
h_Kernel[j] /= kernelSum;
safeCall(cudaMemcpyToSymbol(d_Kernel1, h_Kernel, 5*sizeof(float)));
dim3 blocks(iDivUp(src.width, SCALEDOWN_W), iDivUp(src.height, SCALEDOWN_H));
dim3 threads(SCALEDOWN_W + 4);
ScaleDown<<<blocks, threads>>>(res.d_data, src.d_data, src.width, src.pitch, src.height, res.pitch);
checkMsg("ScaleDown() execution failed\n");
return 0.0;
}
void buildGaussianPyramid(CudaImage& base, std::vector<CudaImage>& pyr, int nOctaves){
//the vector of sigma per octave
std::vector<double> sig(nOctaveLayers + 3);
//init the size of the pyramid images which is nOctave*nLayer
pyr.resize(nOctaves*(nOctaveLayers + 3));
#define USE_SEPARATION_MEMORY
#ifdef USE_SEPARATION_MEMORY
//allocate separation memory
int w = base.width;
int h = base.height;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
for( int i = 0; i < nOctaveLayers + 3; i++ ){
pyr[o*(nOctaveLayers + 3) + i].Allocate(w,h,iAlignUp(w, 128),false);
}
}
#else
//optimization points which allocate a big memory
int w = base.width;
int h = base.height;
int pyrDataSize = 0;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
int p = iAlignUp(w,128);
pyrDataSize += (nOctaveLayers+3)*p*h;
}
float* d_pyrData = NULL;
cudaMalloc(&d_pyrData,pyrDataSize*sizeof(float));
//size_t pitch;
//safeCall(cudaMallocPitch((void **)&d_pyrData, &pitch, (size_t)4096, (pyrDataSize+4095)/4096));
//safeCall(cudaMallocPitch((void **)&d_pyrData, &pitch, (size_t)4096, (pyrDataSize+4095)/4096*sizeof(float)));
int memLocation = 0;
w = base.width;
h = base.height;
for( int o = 0; o < nOctaves; o++ )
{
if(o != 0){
w /= 2;
h /= 2;
}
for( int i = 0; i < nOctaveLayers + 3; i++ ){
int p = iAlignUp(w,128);
pyr[o*(nOctaveLayers + 3) + i].Allocate(w,h,p,false,d_pyrData+memLocation);
//because the d_pyrData is the type of float so the offset of the
//pointer is p*h rather than p*h*sizeof(float)
memLocation += p*h;
}
}
// CudaImage& src = pyr[0*(nOctaveLayers + 3)];
// CudaImage& dst = pyr[0*(nOctaveLayers + 3)+1];
// dst.copyDevice(src,1);
#endif
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = std::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = std::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
CudaImage& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 ){
dst.copyDevice(base,1);
#ifdef SHOW_GAUSSIANPYRAMID
CudaImage &src = dst;
Mat gray,show;
show.create(src.height,src.width,CV_32F);
safeCall(cudaMemcpy2D(show.data,src.width*sizeof(float),src.d_data,src.pitch*sizeof(float),src.width*sizeof(float),(size_t) src.height,cudaMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
CudaImage& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
ScaleDown(dst,src,0.5);
}
else
{
CudaImage& src = pyr[o*(nOctaveLayers + 3) + i-1];
dst.copyDevice(src,1);
cuGaussianBlur(dst,sig[i]);
#ifdef SHOW_GAUSSIANPYRAMID
Mat gray,show;
show.create(dst.height,dst.width,CV_32F);
safeCall(cudaMemcpy2D(show.data,src.width*sizeof(float),dst.d_data,src.pitch*sizeof(float),src.width*sizeof(float),(size_t) src.height,cudaMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
}
}
}
//could use cuda stream
void buildDoGPyramid( std::vector<CudaImage>& gpyr, std::vector<CudaImage>& dogpyr )
{
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
//could use cuda stream
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers + 2;i++){
CudaImage& prev = gpyr[o*(nOctaveLayers + 3)+i];
CudaImage& next = gpyr[o*(nOctaveLayers + 3)+i+1];
CudaImage& diff = dogpyr[o*(nOctaveLayers + 2)+i];
diff.Allocate(prev.width,prev.height,prev.pitch,false);
dim3 Block(32,8);
dim3 Grid(iDivUp(diff.pitch,Block.x),iDivUp(diff.height,Block.y));
differenceImg<<<Grid,Block>>>(prev.d_data,next.d_data,diff.d_data,diff.pitch,diff.height);
safeCall(cudaDeviceSynchronize());
#ifdef SHOW_DOGPYRAMID
Mat gray,show;
show.create(diff.height,diff.width,CV_32F);
safeCall(cudaMemcpy2D(show.data,diff.width*sizeof(float),diff.d_data,diff.pitch*sizeof(float),diff.width*sizeof(float),(size_t) diff.height,cudaMemcpyDeviceToHost));
show.convertTo(gray,DataType<uchar>::type, 30, 200);
cvNamedWindow("ss",CV_WINDOW_NORMAL);
imshow("ss",gray);
waitKey(0);
#endif
}
}
}
int getMaxDescriptorBufSize(int len){
//get the max scl_oct
int radius_ori = (sqrt(len)-1)/2;
//int radius = SIFT_ORI_RADIUS * scl_octv+0.5;
float maxScl_oct = (radius_ori + 1)/SIFT_ORI_RADIUS;
int radius_des = int((SIFT_DESCR_SCL_FCTR * maxScl_oct * 1.4142135623730951f * (SIFT_DESCR_WIDTH + 1) * 0.5f)+0.5);
return int((radius_des*2+1)*(radius_des*2+1));
}
void findScaleSpaceExtrema(std::vector<CudaImage>& gpyr, std::vector<CudaImage>& dogpyr, std::vector<KeyPoint>& keypointss, Mat &descriptors){
float* d_keypoints;
float* h_keypoints;
int totPts = 0;
safeCall(cudaMemcpyToSymbol(d_PointCounter, &totPts, sizeof(int)));
cudaMalloc(&d_keypoints,sizeof(float)*maxPoints*KEYPOINTS_SIZE);
const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
//std::cout<<"my threshold = "<<threshold<<std::endl;
#ifdef FIND_DOGERRORTEST
#else
float **h_pd = new float*[dogpyr.size()];
#endif
for(int i = 0;i<dogpyr.size();i++)
h_pd[i] = dogpyr[i].d_data;
safeCall(cudaMemcpyToSymbol(pd, h_pd, sizeof(float *)*dogpyr.size()));
float **h_gpyr = new float*[gpyr.size()];
for(int i = 0;i<gpyr.size();i++)
h_gpyr[i] = gpyr[i].d_data;
safeCall(cudaMemcpyToSymbol(pgpyr, h_gpyr, sizeof(float *)*gpyr.size()));
//for every OctaveLayers which number is o*3
#if 0
//combine findextrema and oritentation
dim3 Block(32,8);
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers;i++){
int index = o*(nOctaveLayers+2)+i+1;
dim3 Grid(iDivUp(dogpyr[index].pitch,Block.x),iDivUp(dogpyr[index].height,Block.y));
findScaleSpaceExtrema<<<Grid,Block>>>(d_keypoints,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
//calcPerOctaveLayers<<<Grid,Block>>>(d_keypoints,d_decriptor,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
safeCall(cudaDeviceSynchronize());
}
}
#else
int temDataSize = 0;
safeCall(cudaMemcpyToSymbol(temsize, &temDataSize, sizeof(int)));
dim3 Block(32,8);
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
for(int o = 0;o<nOctaves;o++){
for(int i = 0;i<nOctaveLayers;i++){
int index = o*(nOctaveLayers+2)+i+1;
dim3 Grid(iDivUp(dogpyr[index].pitch,Block.x),iDivUp(dogpyr[index].height,Block.y));
findScaleSpaceExtrema_gpu<<<Grid,Block>>>(d_keypoints,index,dogpyr[index].width,dogpyr[index].pitch,dogpyr[index].height,threshold,nOctaveLayers,maxPoints);
safeCall(cudaDeviceSynchronize());
}
}
int num0 = 0;
safeCall(cudaMemcpyFromSymbol(&num0, d_PointCounter, sizeof(int)));
num0 = (num0>maxPoints)? maxPoints:num0;
printf("cuda sift kepoints num : %d \n",num0);
int* oIndex = new int[33];
for(int i =0;i<nOctaves;i++){
int index = i*(nOctaveLayers+2);
oIndex[i*3] = dogpyr[index].width;
oIndex[i*3+1] = dogpyr[index].height;
oIndex[i*3+2] = dogpyr[index].pitch;
}
safeCall(cudaMemcpyToSymbol(d_oIndex, oIndex, sizeof(int)*33));
// int* d_oIndex;
// cudaMalloc(&d_oIndex,sizeof(int)*nOctaves*3);
// cudaMemcpy(d_oIndex,oIndex,sizeof(int)*nOctaves*3,cudaMemcpyHostToDevice);
float* temData;
safeCall(cudaMemcpyFromSymbol(&temDataSize, temsize, sizeof(int)));
//4 is the 4 len buf
int buffSize = temDataSize*4;
safeCall(cudaMalloc(&temData,sizeof(float)*num0*buffSize));
//std::cout<<"buffSize:"<<buffSize<<std::endl;
int grid =iDivUp(num0,BLOCK_SIZE_ONE_DIM);
//use the global memory
//calcOrientationHist_gpu<<<grid,BLOCK_SIZE_ONE_DIM>>>(d_keypoints,temData,buffSize,num0,maxPoints,nOctaveLayers);
calcOrientationHist_gpu1<<<grid,BLOCK_SIZE_ONE_DIM>>>(d_keypoints,temData,buffSize,num0,maxPoints,nOctaveLayers);
safeCall( cudaGetLastError() );
safeCall(cudaDeviceSynchronize());
cudaFree(temData);
int num1 = 0;
safeCall(cudaMemcpyFromSymbol(&num1, d_PointCounter, sizeof(int)));
num1 = (num1>maxPoints)? maxPoints:num1;
printf("cuda sift kepoints num : %d \n",num1);
//alloc for d_decriptor
float* d_descriptor;
int despriptorSize = SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
cudaMalloc(&d_descriptor,sizeof(float)*num1*despriptorSize);
grid =iDivUp(num1,BLOCK_SIZE_ONE_DIM);
calcSIFTDescriptor_gpu<<<grid,BLOCK_SIZE_ONE_DIM>>>(d_keypoints,d_descriptor,num1,nOctaveLayers);
safeCall( cudaGetLastError() );
safeCall(cudaDeviceSynchronize());
float *h_descriptor;
h_descriptor = (float *)malloc(num1*despriptorSize*sizeof(float));
safeCall(cudaMemcpy(h_descriptor,d_descriptor,num1*despriptorSize*sizeof(float),cudaMemcpyDeviceToHost));
descriptors.create(num1,128,CV_32FC1);
safeCall(cudaMemcpy((float*)descriptors.data,d_descriptor,num1*128*sizeof(float),cudaMemcpyDeviceToHost));
#endif
int num = 0;
safeCall(cudaMemcpyFromSymbol(&num, d_PointCounter, sizeof(int)));
num = (num>maxPoints)? maxPoints:num;
printf("cuda sift kepoints num : %d \n",num);
h_keypoints = (float *)malloc(num*KEYPOINTS_SIZE*sizeof(float));
safeCall(cudaMemcpy(h_keypoints,d_keypoints,num*KEYPOINTS_SIZE*sizeof(float),cudaMemcpyDeviceToHost));
cudaFree(d_keypoints);
cudaFree(d_descriptor);
#ifdef SHOW_KEYPOINT
//std::vector<KeyPoint> keypointss;
keypointss.resize(num);
for(int i = 0;i<keypointss.size();++i)
{
keypointss[i].pt.x = h_keypoints[i*KEYPOINTS_SIZE];
keypointss[i].pt.y = h_keypoints[i*KEYPOINTS_SIZE+1];
keypointss[i].octave = h_keypoints[i*KEYPOINTS_SIZE+2];
keypointss[i].size = h_keypoints[i*KEYPOINTS_SIZE+3];
keypointss[i].response = h_keypoints[i*KEYPOINTS_SIZE+4];
keypointss[i].angle = h_keypoints[i*KEYPOINTS_SIZE+5];
}
// KeyPointsFilter::removeDuplicatedSorted( keypointss );
// printf("my sift kepoints num after clear : %d \n",keypointss.size());
#ifdef NODOUBLEIMAGE
#else
int firstOctave = -1;
if( firstOctave < 0 )
for( size_t i = 0; i < keypointss.size(); i++ )
{
KeyPoint& kpt = keypointss[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
#endif
// Mat kepoint;
//// CudaImage &img = gpyr[0];
//// Mat img_1(img.height,img.width,CV_32F);
//// safeCall(cudaMemcpy2D(img_1.data,img.width*sizeof(float),gpyr[0].d_data,gpyr[0].pitch*sizeof(float),gpyr[0].width*sizeof(float),(size_t) gpyr[0].height,cudaMemcpyDeviceToHost));
// //char *a ="../data/100_7101.JPG";
// //char *a ="../data/img2.ppm";
// //char *a ="../data/100_7101.JPG";
// char *a ="../data/road.png";
// Mat img_1 = imread(a);
// Mat gray;
// img_1.convertTo(gray,DataType<uchar>::type, 1, 0);
// drawKeypoints(gray,keypointss,kepoint,cv::Scalar::all(-1),4);
// cvNamedWindow("extract_my",CV_WINDOW_NORMAL);
// imshow("extract_my", kepoint);
// waitKey(0);
// for(int i = 0;i < keypointss.size();i++)
// std::cout<<keypointss[i].pt.x<<" ";
// std::cout<<std::endl;
#ifdef COMPARE_VALUE
sort(keypointss.begin(),keypointss.end(),sortx);
int unique_nums;
unique_nums = std::unique(keypointss.begin(),keypointss.end(),uniquex) - keypointss.begin();
for(int i = 0;i < unique_nums;i++)
std::cout<<keypointss[i].response<<" ";
std::cout<<unique_nums<<std::endl;
#endif
#endif
free(h_keypoints);
free(h_descriptor);
}
void calcDescriptors(std::vector<CudaImage>& gpyr,float* d_keypoints){
}
void displayOctave(std::vector<CudaImage> &Octave)
{
Mat display;
int width = Octave[0].width;
int height = Octave[0].height*Octave.size();
display.create(height,width,CV_32F);
// for(int i = 0 ; i<Octave.size(); i++){
// safeCall(cudaMemcpy2D(display.data+width*Octave[0].height*sizeof(float)*i,Octave[0].width*sizeof(float),Octave[0].d_data,Octave[0].pitch*sizeof(float),Octave[0].width*sizeof(float),(size_t) Octave[0].height,cudaMemcpyDeviceToHost));
// }
for(int i = 0 ; i<Octave.size(); i++){
safeCall(cudaMemcpy2D(display.data+Octave[i].width*Octave[i].height*i*sizeof(float),Octave[i].width*sizeof(float),Octave[i].d_data,Octave[i].pitch*sizeof(float),Octave[i].width*sizeof(float),(size_t) Octave[i].height,cudaMemcpyDeviceToHost));
}
Mat gray;
display.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("a",CV_WINDOW_NORMAL);
imshow("a",gray);
waitKey(0);
}
void disMatf(char* name,CudaImage &img){
Mat dis(img.height,img.width,CV_32F);
safeCall(cudaMemcpy2D(dis.data,img.width*sizeof(float),img.d_data,img.pitch*sizeof(float),img.width*sizeof(float),(size_t) img.height,cudaMemcpyDeviceToHost));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 200);
cvNamedWindow(name,CV_WINDOW_NORMAL);
imshow(name,gray);
}
void computePerOctave(CudaImage& base, std::vector<double> &sig, int nOctaveLayers){
std::vector<CudaImage> Octave;
Octave.resize(nOctaveLayers + 3);
Octave[0].copyDevice(base);
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
Octave[i].copyDevice(Octave[i-1]);
cuGaussianBlur(Octave[i],sig[i]);
}
//displayOctave(Octave);
std::vector<CudaImage> diffOctave;
diffOctave.resize(nOctaveLayers+2);
for(int i = 0;i<diffOctave.size();++i)
diffOctave[i].Allocate(Octave[0].width,Octave[0].height,Octave[0].pitch,NULL,NULL);
// float *d_data,pitch;
// safeCall(cudaMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*Octave[0].width*5), (size_t)Octave[0].height));
dim3 Block(32,8);
dim3 Grid(iDivUp(Octave[0].pitch,Block.x),iDivUp(Octave[0].height,Block.y));
for(int i = 0;i<diffOctave.size();i++){
differenceImg<<<Grid,Block>>>(Octave[i].d_data,Octave[i+1].d_data,diffOctave[i].d_data,Octave[0].pitch,Octave[0].height);
safeCall(cudaDeviceSynchronize());
}
#ifdef SHOW
//displayOctave(diffOctave);
#endif
////////////////////
/// findScaleSpaceExtrema
////////////////////
int totPts = 0;
safeCall(cudaMemcpyToSymbol(d_PointCounter, &totPts, sizeof(int)));
float *d_point;
cudaMalloc(&d_point,sizeof(float)*maxPoints*2);
//for(int i = 0 ; i < diffOctave - 1;i++)
int i = 2;
//findScaleSpaceExtrema<<<Grid,Block>>>(diffOctave[i].d_data,diffOctave[i+1].d_data,diffOctave[i+2].d_data,d_point,Octave[0].width,Octave[0].pitch,Octave[0].height);
//safeCall(cudaDeviceSynchronize());
// float *p[3+2];
// float d = 2;
// float *s = &d;
// p[0] = s;
// std::cout<<*(p[0])<<" "<< sizeof(float*) <<std::endl;
// test<<<1,1>>>(p);
float *h_pd[3+2];
for(int i = 0;i<5;i++)
h_pd[i] = diffOctave[i].d_data;
safeCall(cudaMemcpyToSymbol(pd, h_pd, sizeof(float *)*5));
int width = Octave[0].width;
int pitch = Octave[0].pitch;
int heigh = Octave[0].height;
//findScaleSpaceExtrema<<<Grid,Block>>>(d_point,3,Octave[0].width,Octave[0].pitch,Octave[0].height);
safeCall(cudaDeviceSynchronize());
#ifdef SHOW
disMatf("prve",diffOctave[i]);
disMatf("current",diffOctave[i+1]);
disMatf("next",diffOctave[i+2]);
waitKey(0);
#endif
// test<<<2,23>>>();
int num = 0;
safeCall(cudaMemcpyFromSymbol(&num, d_PointCounter, sizeof(int)));
num = (num>2000)? 2000:num;
printf("width : %d , height : %d , num : %d \n",Octave[0].width,Octave[0].height,num);
float *h_points;
h_points = (float *)malloc(num*2*sizeof(float));
//h_points = new float[num*2];
safeCall(cudaMemcpy(h_points,d_point,num*2*sizeof(float),cudaMemcpyDeviceToHost));
std::vector<KeyPoint> keypoints;
keypoints.resize(num);
for(int i = 0;i<keypoints.size();++i)
{
keypoints[i].pt.x = h_points[i*2];
keypoints[i].pt.y = h_points[i*2+1];
}
#ifdef SHOW
Mat kepoint;
CudaImage &img = diffOctave[i+1];
Mat img_1(img.height,img.width,CV_32F);
safeCall(cudaMemcpy2D(img_1.data,img.width*sizeof(float),diffOctave[i+1].d_data,img.pitch*sizeof(float),img.width*sizeof(float),(size_t) img.height,cudaMemcpyDeviceToHost));
Mat gray;
img_1.convertTo(gray,DataType<uchar>::type, 1, 200);
drawKeypoints(gray,keypoints,kepoint);
//char *a ="../data/road.png";
//Mat img_1 = imread(a);
//drawKeypoints(img_1,keypoints,kepoint);
cvNamedWindow("extract_my",CV_WINDOW_NORMAL);
imshow("extract_my", kepoint);
waitKey(0);
#endif
}
/*disable*/
void disMatf(CudaImage &cuImg){
Mat dis(cuImg.height,cuImg.width,CV_32F);
for(int i = 0;i<dis.rows;i++)
{
float *p = dis.ptr<float>(i);
for(int j = 0;j<dis.cols;j++){
p[j] = cuImg.h_data[i*dis.cols+j];
//std::cout<<p[j]<<" ";
}
//std::cout<<std::endl;
}
//memcpy(dis.data,cuImg.h_data,cuImg.width*cuImg.height*sizeof(float));
Mat gray;
dis.convertTo(gray,DataType<uchar>::type, 1, 0);
cvNamedWindow("ff",CV_WINDOW_NORMAL);
imshow("ff",gray);
waitKey();
}
|
1ba940b02eca24d967b30d15000279c7bbb06cd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_func.h"
/* gpuMatMultWithTextureKernelGPUtexture
* resultresult[S][SM];
* mAresult
* nAB
* sBresult
*/
__global__ void gpuMatMultAndTransWithTextureKernel(hipfftComplex * result, const int m, const int n, const int s) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if (offset < m * s)
{
hipfftComplex a,b;
hipfftComplex temp_result;
temp_result.x = 0;
temp_result.y = 0;
for (int i = 0; i < n; i++)
{
a.x = tex1Dfetch(texA, y * n + i).x;
a.y = tex1Dfetch(texA, y * n + i).y;
b.x = tex1Dfetch(texB, i * s + x).x;
b.y = tex1Dfetch(texB, i * s + x).y;
hipfftComplex temp;
temp.x = a.x * b.x - a.y * b.y;
temp.y = a.x * b.y + a.y * b.x;
temp_result.x += temp.x;
temp_result.y += temp.y;
}
//result[offset] = temp_result;
result[x * m + y] = temp_result;
}
}
/* gpuDotMulWithTextureKernelGPUtexture
* resultresult[S][M];
* mAresult
* sBresult
*/
__global__ void gpuDotMulWithTextureKernel(hipfftComplex * result, const int m, const int s) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int bi_index = offset % m;
hipfftComplex a,b;
double real,imag;
if (offset < m * s) {
a.x = tex1Dfetch(texA, offset).x;
a.y = tex1Dfetch(texA, offset).y;
b.x = tex1Dfetch(texB, bi_index).x;
b.y = tex1Dfetch(texB, bi_index).y;
real = (double)a.x * (double)b.x - (double)a.y * (double)b.y;
imag = (double)a.x * (double)b.y + (double)a.y * (double)b.x;
result[offset].x = real;
result[offset].y = imag;
}
}
/* doAll: GPUgpuMatMultAndTransWithTextureKernelgpuDotMulWithTextureKernel
* FFT
* a: m*n
* b: n*s
* result: s*m
* bi: bi
*/
hipError_t doAll(const hipfftComplex *a, const hipfftComplex *b, hipfftComplex *result, const int m, const int n, const int s, const int bi,hipfftHandle &plan_NX_Many) {
hipfftComplex * dev_a;
hipfftComplex * dev_b;
hipfftComplex * dev_result;
hipfftComplex * dev_bi_data;
hipError_t cudaStatus;
hipChannelFormatDesc desc = hipCreateChannelDesc<hipfftComplex>();
//hipEvent_t gpuStart, gpuFinish;
//float elapsedTime;
//hipEventCreate(&gpuStart);
//hipEventCreate(&gpuFinish);
//hipEventRecord(gpuStart, 0);
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA_capable GPU installed?\n");
goto Error;
}
cudaStatus = hipMalloc((void **)&dev_a, m * n * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc dev_a failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void **)&dev_b, n * s * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc dev_b failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void **)&dev_result, m * s * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc dev_result failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void **)&dev_bi_data, m * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc dev_bi_data failed!\n");
goto Error;
}
cudaStatus = hipBindTexture(NULL, texA, dev_a, desc, m * n * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipBindTexture texA failed!\n");
goto Error;
}
cudaStatus = hipBindTexture(NULL, texB, dev_b, desc, n * s * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipBindTexture texB failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(dev_a, a, m * n * sizeof(hipfftComplex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "cudamemcpy dev_a failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, n * s * sizeof(hipfftComplex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy dev_b failed!\n");
goto Error;
}
if ((m % BLOCK_SIZE != 0) && (s % BLOCK_SIZE != 0))
{
fprintf(stderr, "M or S can't be dividen by 16!\n");
goto Error;
}
//Mul%Trans
gpuMatMultAndTransWithTextureKernel << <grid, block >> >(dev_result, m, n, s);
//FFT
checkCudaErrors(hipfftExecC2C(plan_NX_Many, dev_result, dev_result, HIPFFT_FORWARD));
//DouMul
hipUnbindTexture(texA);
hipUnbindTexture(texB);
cudaStatus = hipBindTexture(NULL, texA, dev_result, desc, m * s * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipBindTexture texA failed!\n");
goto Error;
}
cudaStatus = hipBindTexture(NULL, texB, dev_bi_data, desc, m * sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipBindTexture texB failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(dev_bi_data, dev_result + bi * m, m * sizeof(hipfftComplex), hipMemcpyDeviceToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy dev_b failed!\n");
goto Error;
}
gpuDotMulWithTextureKernel << <grid, block >> >(dev_result, m, s);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "MulKernel launch failed: %s!\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceSynchronize return Error code %d after Kernel launched!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(result, dev_result, m * s * sizeof(hipfftComplex), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy result failed!\n");
goto Error;
}
//hipEventRecord(gpuFinish, 0);
//hipEventSynchronize(gpuFinish);
//hipEventElapsedTime(&elapsedTime, gpuStart, gpuFinish);
//printf("\nThe time of GPU do all is %f seconds.\n", elapsedTime / 1000.0);
Error:
hipUnbindTexture(texA);
hipUnbindTexture(texB);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_result);
return cudaStatus;
}
void test_FFT(hipfftComplex *data) {
int i;
for(i = 0;i<24;i++){
hipfftComplex *d_fftData;
hipMalloc((void**)&d_fftData,M*sizeof(hipfftComplex));
hipMemcpy(d_fftData,data+i*M,M*sizeof(hipfftComplex),hipMemcpyHostToDevice);
hipfftHandle plan;
hipfftPlan1d(&plan,M,HIPFFT_C2C,1);
hipfftExecC2C(plan,(hipfftComplex*)d_fftData,(hipfftComplex*)d_fftData,HIPFFT_FORWARD);
hipDeviceSynchronize();
hipMemcpy(data+i*M,d_fftData,M*sizeof(hipfftComplex),hipMemcpyDeviceToHost);
}
} | 1ba940b02eca24d967b30d15000279c7bbb06cd2.cu | #include "gpu_func.h"
/* gpuMatMultWithTextureKernel:GPU下使用texture内存的矩阵乘法,并将结果存在转置后对应的位置
* result:结果矩阵,表示为result[S][SM];
* m:表示为矩阵A与矩阵result的行数
* n:表示矩阵A的列数,矩阵B的行数
* s:表示矩阵B和矩阵result的列数
*/
__global__ void gpuMatMultAndTransWithTextureKernel(cufftComplex * result, const int m, const int n, const int s) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if (offset < m * s)
{
cufftComplex a,b;
cufftComplex temp_result;
temp_result.x = 0;
temp_result.y = 0;
for (int i = 0; i < n; i++)
{
a.x = tex1Dfetch(texA, y * n + i).x;
a.y = tex1Dfetch(texA, y * n + i).y;
b.x = tex1Dfetch(texB, i * s + x).x;
b.y = tex1Dfetch(texB, i * s + x).y;
cufftComplex temp;
temp.x = a.x * b.x - a.y * b.y;
temp.y = a.x * b.y + a.y * b.x;
temp_result.x += temp.x;
temp_result.y += temp.y;
}
//result[offset] = temp_result;
result[x * m + y] = temp_result;
}
}
/* gpuDotMulWithTextureKernel:GPU下使用texture内存的矩阵点乘,并将结果存在转置后对应的位置
* result:结果矩阵,表示为result[S][M];
* m:表示为矩阵A与矩阵result的列数
* s:表示矩阵B和矩阵result的行数
*/
__global__ void gpuDotMulWithTextureKernel(cufftComplex * result, const int m, const int s) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int bi_index = offset % m;
cufftComplex a,b;
double real,imag;
if (offset < m * s) {
a.x = tex1Dfetch(texA, offset).x;
a.y = tex1Dfetch(texA, offset).y;
b.x = tex1Dfetch(texB, bi_index).x;
b.y = tex1Dfetch(texB, bi_index).y;
real = (double)a.x * (double)b.x - (double)a.y * (double)b.y;
imag = (double)a.x * (double)b.y + (double)a.y * (double)b.x;
result[offset].x = real;
result[offset].y = imag;
}
}
/* doAll: GPU下依次调用gpuMatMultAndTransWithTextureKernel和gpuDotMulWithTextureKernel
* 完成矩阵乘、转置、FFT、点乘操作
* a: m*n 矩阵
* b: n*s 矩阵
* result: s*m 矩阵
* bi: 与第bi行做点乘
*/
cudaError_t doAll(const cufftComplex *a, const cufftComplex *b, cufftComplex *result, const int m, const int n, const int s, const int bi,cufftHandle &plan_NX_Many) {
cufftComplex * dev_a;
cufftComplex * dev_b;
cufftComplex * dev_result;
cufftComplex * dev_bi_data;
cudaError_t cudaStatus;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<cufftComplex>();
//cudaEvent_t gpuStart, gpuFinish;
//float elapsedTime;
//cudaEventCreate(&gpuStart);
//cudaEventCreate(&gpuFinish);
//cudaEventRecord(gpuStart, 0);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA_capable GPU installed?\n");
goto Error;
}
cudaStatus = cudaMalloc((void **)&dev_a, m * n * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc dev_a failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void **)&dev_b, n * s * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc dev_b failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void **)&dev_result, m * s * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc dev_result failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void **)&dev_bi_data, m * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc dev_bi_data failed!\n");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, texA, dev_a, desc, m * n * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaBindTexture texA failed!\n");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, texB, dev_b, desc, n * s * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaBindTexture texB failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, m * n * sizeof(cufftComplex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudamemcpy dev_a failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, n * s * sizeof(cufftComplex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy dev_b failed!\n");
goto Error;
}
if ((m % BLOCK_SIZE != 0) && (s % BLOCK_SIZE != 0))
{
fprintf(stderr, "M or S can't be dividen by 16!\n");
goto Error;
}
//Mul%Trans
gpuMatMultAndTransWithTextureKernel << <grid, block >> >(dev_result, m, n, s);
//FFT
checkCudaErrors(cufftExecC2C(plan_NX_Many, dev_result, dev_result, CUFFT_FORWARD));
//DouMul
cudaUnbindTexture(texA);
cudaUnbindTexture(texB);
cudaStatus = cudaBindTexture(NULL, texA, dev_result, desc, m * s * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaBindTexture texA failed!\n");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, texB, dev_bi_data, desc, m * sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaBindTexture texB failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(dev_bi_data, dev_result + bi * m, m * sizeof(cufftComplex), cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy dev_b failed!\n");
goto Error;
}
gpuDotMulWithTextureKernel << <grid, block >> >(dev_result, m, s);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "MulKernel launch failed: %s!\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize return Error code %d after Kernel launched!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(result, dev_result, m * s * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy result failed!\n");
goto Error;
}
//cudaEventRecord(gpuFinish, 0);
//cudaEventSynchronize(gpuFinish);
//cudaEventElapsedTime(&elapsedTime, gpuStart, gpuFinish);
//printf("\nThe time of GPU do all is %f seconds.\n", elapsedTime / 1000.0);
Error:
cudaUnbindTexture(texA);
cudaUnbindTexture(texB);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_result);
return cudaStatus;
}
void test_FFT(cufftComplex *data) {
int i;
for(i = 0;i<24;i++){
cufftComplex *d_fftData;
cudaMalloc((void**)&d_fftData,M*sizeof(cufftComplex));
cudaMemcpy(d_fftData,data+i*M,M*sizeof(cufftComplex),cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan1d(&plan,M,CUFFT_C2C,1);
cufftExecC2C(plan,(cufftComplex*)d_fftData,(cufftComplex*)d_fftData,CUFFT_FORWARD);
cudaDeviceSynchronize();
cudaMemcpy(data+i*M,d_fftData,M*sizeof(cufftComplex),cudaMemcpyDeviceToHost);
}
} |
334a3ceaf9d553cb246622edbe683ee330ab3f52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include "BDLSTM.cuh"
double[] SigmoidA(double[] x)
{
double[] result = new double[x.GetLength(0)];
for (int i=0; i < x.GetLength(0); i++)
{
result[i] = 1.0/(1.0 + Math.Exp(-x[i]));
}
return result;
}
double[] matadd1D(double[] m1, double[] m2)
{
double[] result = new double[m1.GetLength(0)];
for (int i = 0; i < m1.GetLength(0); i++)
{
result[i] = m1[i] + m2[i];
}
return result;
}
double[] matmult2(double[]arr1,double[,]arr2)
{
double temp = 0;
double[] result = new double[arr2.GetLength(1)];
for (int i = 0; i < arr2.GetLength(1); i++)
{
for (int j = 0; j < arr1.GetLength(0); j++)
{
temp += arr1[j] * arr2[j, i];
}
result[i] = temp;
temp = 0;
}
return result;
}
double[] mult(double[]arr1,double[]arr2)
{
double[] result = new double[arr1.GetLength(0)];
if(arr1.GetLength(0) == arr2.GetLength(0))
{
for(int i=0; i < arr1.GetLength(0); i++)
{
result[i] = arr1[i] * arr2[i];
}
}
else
{
throw new Exception("columns not equal");
}
return result;
}
double[] Tanh(double[] arr)
{
double[] result = new double[arr.GetLength(0)];
for(int i=0; i<arr.Length; i++)
{
result[i] = Math.Tanh(arr[i]);
}
return result;
}
__host__ int BDLSTM_cuda(
double const **const input
, double const* nnFlat
, double const* nnLong
, double const* nnShort
){
int rc = 0;
double[] lstm_output = new double[256];
double[] f_lstm_output = new double[256];
double[] b_lstm_output = new double[256];
double[] f_lstm_state = new double[256];
double[] b_lstm_state = new double[256];
for (int i = 0; i < lstm_output.Length; i++)
{
lstm_output[i] = 0.001;
f_lstm_output[i] = 0.001;
b_lstm_output[i] = 0.001;
f_lstm_state[i] = 0.001;
b_lstm_state[i] = 0.001;
}
nnFlat = nnLong = nnShort = -0.01;
for (int i = 0; i < _input.Length; i++)
{
double[] f_iiput = _input[i];
double[] b_iiput = _input[_input.Length - i - 1];
double[] f_input_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_ig_2D),(matmult2(lstm_output,f_ih_2D))),f_bi));
double[] b_input_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_ig_2D),(matmult2(lstm_output,b_ih_2D))),b_bi));
double[] f_forget_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_fg_2D),(matmult2(lstm_output,f_fh_2D))),f_bf));
double[] b_forget_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_fg_2D),(matmult2(lstm_output,b_fh_2D))),b_bf));
double[] f_output_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_og_2D),(matmult2(lstm_output,f_oh_2D))),f_bo));
double[] b_output_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_og_2D),(matmult2(lstm_output,b_oh_2D))),f_bo));
double[] f_memory_cell = Tanh(matadd1D(matadd1D(matmult2(f_iiput,f_mc_2D),(matmult2(lstm_output,f_mh_2D))),f_bm));
double[] b_memory_cell = Tanh(matadd1D(matadd1D(matmult2(b_iiput,b_mc_2D),(matmult2(lstm_output,b_mh_2D))),b_bm));
f_lstm_state = matadd1D((mult(f_lstm_state,f_input_gate)),(mult(f_forget_gate, f_memory_cell)));
b_lstm_state = matadd1D((mult(b_lstm_state,b_input_gate)),(mult(b_forget_gate, b_memory_cell)));
f_lstm_output = mult(f_output_gate,(Tanh(f_lstm_state)));
b_lstm_output = mult(f_output_gate,(Tanh(b_lstm_state)));
lstm_output = Tanh(matadd1D(f_lstm_output,b_lstm_output));
}
double[] model = matadd1D(matmult2(lstm_output, wo_2D), bol);
double[] nnModel = Softmax(model);
nnFlat = nnModel[0];
nnLong = nnModel[1];
nnShort= nnModel[2];
return rc;
}
__global__ int GPU_LSTM()
{
}
| 334a3ceaf9d553cb246622edbe683ee330ab3f52.cu | #include <stdlib.h>
#include <string.h>
#include "BDLSTM.cuh"
double[] SigmoidA(double[] x)
{
double[] result = new double[x.GetLength(0)];
for (int i=0; i < x.GetLength(0); i++)
{
result[i] = 1.0/(1.0 + Math.Exp(-x[i]));
}
return result;
}
double[] matadd1D(double[] m1, double[] m2)
{
double[] result = new double[m1.GetLength(0)];
for (int i = 0; i < m1.GetLength(0); i++)
{
result[i] = m1[i] + m2[i];
}
return result;
}
double[] matmult2(double[]arr1,double[,]arr2)
{
double temp = 0;
double[] result = new double[arr2.GetLength(1)];
for (int i = 0; i < arr2.GetLength(1); i++)
{
for (int j = 0; j < arr1.GetLength(0); j++)
{
temp += arr1[j] * arr2[j, i];
}
result[i] = temp;
temp = 0;
}
return result;
}
double[] mult(double[]arr1,double[]arr2)
{
double[] result = new double[arr1.GetLength(0)];
if(arr1.GetLength(0) == arr2.GetLength(0))
{
for(int i=0; i < arr1.GetLength(0); i++)
{
result[i] = arr1[i] * arr2[i];
}
}
else
{
throw new Exception("columns not equal");
}
return result;
}
double[] Tanh(double[] arr)
{
double[] result = new double[arr.GetLength(0)];
for(int i=0; i<arr.Length; i++)
{
result[i] = Math.Tanh(arr[i]);
}
return result;
}
__host__ int BDLSTM_cuda(
double const **const input
, double const* nnFlat
, double const* nnLong
, double const* nnShort
){
int rc = 0;
double[] lstm_output = new double[256];
double[] f_lstm_output = new double[256];
double[] b_lstm_output = new double[256];
double[] f_lstm_state = new double[256];
double[] b_lstm_state = new double[256];
for (int i = 0; i < lstm_output.Length; i++)
{
lstm_output[i] = 0.001;
f_lstm_output[i] = 0.001;
b_lstm_output[i] = 0.001;
f_lstm_state[i] = 0.001;
b_lstm_state[i] = 0.001;
}
nnFlat = nnLong = nnShort = -0.01;
for (int i = 0; i < _input.Length; i++)
{
double[] f_iiput = _input[i];
double[] b_iiput = _input[_input.Length - i - 1];
double[] f_input_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_ig_2D),(matmult2(lstm_output,f_ih_2D))),f_bi));
double[] b_input_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_ig_2D),(matmult2(lstm_output,b_ih_2D))),b_bi));
double[] f_forget_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_fg_2D),(matmult2(lstm_output,f_fh_2D))),f_bf));
double[] b_forget_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_fg_2D),(matmult2(lstm_output,b_fh_2D))),b_bf));
double[] f_output_gate = SigmoidA(matadd1D(matadd1D(matmult2(f_iiput,f_og_2D),(matmult2(lstm_output,f_oh_2D))),f_bo));
double[] b_output_gate = SigmoidA(matadd1D(matadd1D(matmult2(b_iiput,b_og_2D),(matmult2(lstm_output,b_oh_2D))),f_bo));
double[] f_memory_cell = Tanh(matadd1D(matadd1D(matmult2(f_iiput,f_mc_2D),(matmult2(lstm_output,f_mh_2D))),f_bm));
double[] b_memory_cell = Tanh(matadd1D(matadd1D(matmult2(b_iiput,b_mc_2D),(matmult2(lstm_output,b_mh_2D))),b_bm));
f_lstm_state = matadd1D((mult(f_lstm_state,f_input_gate)),(mult(f_forget_gate, f_memory_cell)));
b_lstm_state = matadd1D((mult(b_lstm_state,b_input_gate)),(mult(b_forget_gate, b_memory_cell)));
f_lstm_output = mult(f_output_gate,(Tanh(f_lstm_state)));
b_lstm_output = mult(f_output_gate,(Tanh(b_lstm_state)));
lstm_output = Tanh(matadd1D(f_lstm_output,b_lstm_output));
}
double[] model = matadd1D(matmult2(lstm_output, wo_2D), bol);
double[] nnModel = Softmax(model);
nnFlat = nnModel[0];
nnLong = nnModel[1];
nnShort= nnModel[2];
return rc;
}
__global__ int GPU_LSTM()
{
}
|
f5ecb05033e9001f6170316a4a11f5f2ed9a5e30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ int g_counter;
////////////////////////////////////////////////////////////////////////
// buildPointList
const int PIXELS_PER_THREAD = 16;
__global__ void buildPointList(const DevMem2Db src, unsigned int* list)
{
__shared__ int s_queues[4][32 * PIXELS_PER_THREAD];
__shared__ int s_qsize[4];
__shared__ int s_start[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= src.rows)
return;
if (threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
// fill the queue
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x)
{
if (src(y, xx))
{
const unsigned int val = (y << 16) | xx;
const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1);
s_queues[threadIdx.y][qidx] = val;
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int total_size = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_start[i] = total_size;
total_size += s_qsize[i];
}
// calculate the offset in the global list
const int global_offset = atomicAdd(&g_counter, total_size);
for (int i = 0; i < blockDim.y; ++i)
s_start[i] += global_offset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_qsize[threadIdx.y];
for(int i = threadIdx.x; i < qsize; i += blockDim.x)
{
const unsigned int val = s_queues[threadIdx.y][i];
list[s_start[threadIdx.y] + i] = val;
}
}
int buildPointList_gpu(DevMem2Db src, unsigned int* list)
{
void* counter_ptr;
cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(int)) );
const dim3 block(32, 4);
const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(buildPointList, hipFuncCachePreferShared) );
hipLaunchKernelGGL(( buildPointList), dim3(grid), dim3(block), 0, 0, src, list);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int total_count;
cudaSafeCall( hipMemcpy(&total_count, counter_ptr, sizeof(int), hipMemcpyDeviceToHost) );
return total_count;
}
////////////////////////////////////////////////////////////////////////
// linesAccum
__global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
const int n = blockIdx.x;
const float ang = n * theta;
float sin_ang;
float cos_ang;
sincosf(ang, &sin_ang, &cos_ang);
const float tabSin = sin_ang * irho;
const float tabCos = cos_ang * irho;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int qvalue = list[i];
const int x = (qvalue & 0x0000FFFF);
const int y = (qvalue >> 16) & 0x0000FFFF;
int r = __float2int_rn(x * tabCos + y * tabSin);
r += (numrho - 1) / 2;
::atomicAdd(accum.ptr(n + 1) + r + 1, 1);
}
}
__global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
extern __shared__ int smem[];
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n * theta;
float sin_ang;
float cos_ang;
sincosf(ang, &sin_ang, &cos_ang);
const float tabSin = sin_ang * irho;
const float tabCos = cos_ang * irho;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int qvalue = list[i];
const int x = (qvalue & 0x0000FFFF);
const int y = (qvalue >> 16) & 0x0000FFFF;
int r = __float2int_rn(x * tabCos + y * tabSin);
r += (numrho - 1) / 2;
Emulation::smem::atomicAdd(&smem[r + 1], 1);
}
__syncthreads();
for (int i = threadIdx.x; i < numrho; i += blockDim.x)
accum(n + 1, i) = smem[i];
}
void linesAccum_gpu(const unsigned int* list, int count, DevMem2Di accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
{
const dim3 block(has20 ? 1024 : 512);
const dim3 grid(accum.rows - 2);
cudaSafeCall( hipFuncSetCacheConfig(linesAccumShared, hipFuncCachePreferShared) );
size_t smemSize = (accum.cols - 1) * sizeof(int);
if (smemSize < sharedMemPerBlock - 1000)
hipLaunchKernelGGL(( linesAccumShared), dim3(grid), dim3(block), smemSize, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2);
else
hipLaunchKernelGGL(( linesAccumGlobal), dim3(grid), dim3(block), 0, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// linesGetResult
__global__ void linesGetResult(const DevMem2Di accum, float2* out, int* votes, const int maxSize, const float threshold, const float theta, const float rho, const int numrho)
{
__shared__ int smem[8][32];
int r = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
int n = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
if (r >= accum.cols || n >= accum.rows)
return;
smem[threadIdx.y][threadIdx.x] = accum(n, r);
__syncthreads();
r -= 1;
n -= 1;
if (threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1 || r >= accum.cols - 2 || n >= accum.rows - 2)
return;
if (smem[threadIdx.y][threadIdx.x] > threshold &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y - 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y + 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y][threadIdx.x - 1] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y][threadIdx.x + 1])
{
const float radius = (r - (numrho - 1) * 0.5f) * rho;
const float angle = n * theta;
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float2(radius, angle);
votes[ind] = smem[threadIdx.y][threadIdx.x];
}
}
}
int linesGetResult_gpu(DevMem2Di accum, float2* out, int* votes, int maxSize, float rho, float theta, float threshold, bool doSort)
{
void* counter_ptr;
cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols, block.x - 2), divUp(accum.rows, block.y - 2));
hipLaunchKernelGGL(( linesGetResult), dim3(grid), dim3(block), 0, 0, accum, out, votes, maxSize, threshold, theta, rho, accum.cols - 2);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int total_count;
cudaSafeCall( hipMemcpy(&total_count, counter_ptr, sizeof(int), hipMemcpyDeviceToHost) );
total_count = ::min(total_count, maxSize);
if (doSort && total_count > 0)
{
thrust::device_ptr<float2> out_ptr(out);
thrust::device_ptr<int> votes_ptr(votes);
thrust::sort_by_key(votes_ptr, votes_ptr + total_count, out_ptr, thrust::greater<int>());
}
return total_count;
}
}
}}}
| f5ecb05033e9001f6170316a4a11f5f2ed9a5e30.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ int g_counter;
////////////////////////////////////////////////////////////////////////
// buildPointList
const int PIXELS_PER_THREAD = 16;
__global__ void buildPointList(const DevMem2Db src, unsigned int* list)
{
__shared__ int s_queues[4][32 * PIXELS_PER_THREAD];
__shared__ int s_qsize[4];
__shared__ int s_start[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= src.rows)
return;
if (threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
// fill the queue
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x)
{
if (src(y, xx))
{
const unsigned int val = (y << 16) | xx;
const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1);
s_queues[threadIdx.y][qidx] = val;
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int total_size = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_start[i] = total_size;
total_size += s_qsize[i];
}
// calculate the offset in the global list
const int global_offset = atomicAdd(&g_counter, total_size);
for (int i = 0; i < blockDim.y; ++i)
s_start[i] += global_offset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_qsize[threadIdx.y];
for(int i = threadIdx.x; i < qsize; i += blockDim.x)
{
const unsigned int val = s_queues[threadIdx.y][i];
list[s_start[threadIdx.y] + i] = val;
}
}
int buildPointList_gpu(DevMem2Db src, unsigned int* list)
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) );
const dim3 block(32, 4);
const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(buildPointList, cudaFuncCachePreferShared) );
buildPointList<<<grid, block>>>(src, list);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int total_count;
cudaSafeCall( cudaMemcpy(&total_count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) );
return total_count;
}
////////////////////////////////////////////////////////////////////////
// linesAccum
__global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
const int n = blockIdx.x;
const float ang = n * theta;
float sin_ang;
float cos_ang;
sincosf(ang, &sin_ang, &cos_ang);
const float tabSin = sin_ang * irho;
const float tabCos = cos_ang * irho;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int qvalue = list[i];
const int x = (qvalue & 0x0000FFFF);
const int y = (qvalue >> 16) & 0x0000FFFF;
int r = __float2int_rn(x * tabCos + y * tabSin);
r += (numrho - 1) / 2;
::atomicAdd(accum.ptr(n + 1) + r + 1, 1);
}
}
__global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
extern __shared__ int smem[];
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n * theta;
float sin_ang;
float cos_ang;
sincosf(ang, &sin_ang, &cos_ang);
const float tabSin = sin_ang * irho;
const float tabCos = cos_ang * irho;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int qvalue = list[i];
const int x = (qvalue & 0x0000FFFF);
const int y = (qvalue >> 16) & 0x0000FFFF;
int r = __float2int_rn(x * tabCos + y * tabSin);
r += (numrho - 1) / 2;
Emulation::smem::atomicAdd(&smem[r + 1], 1);
}
__syncthreads();
for (int i = threadIdx.x; i < numrho; i += blockDim.x)
accum(n + 1, i) = smem[i];
}
void linesAccum_gpu(const unsigned int* list, int count, DevMem2Di accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
{
const dim3 block(has20 ? 1024 : 512);
const dim3 grid(accum.rows - 2);
cudaSafeCall( cudaFuncSetCacheConfig(linesAccumShared, cudaFuncCachePreferShared) );
size_t smemSize = (accum.cols - 1) * sizeof(int);
if (smemSize < sharedMemPerBlock - 1000)
linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
else
linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// linesGetResult
__global__ void linesGetResult(const DevMem2Di accum, float2* out, int* votes, const int maxSize, const float threshold, const float theta, const float rho, const int numrho)
{
__shared__ int smem[8][32];
int r = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
int n = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
if (r >= accum.cols || n >= accum.rows)
return;
smem[threadIdx.y][threadIdx.x] = accum(n, r);
__syncthreads();
r -= 1;
n -= 1;
if (threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1 || r >= accum.cols - 2 || n >= accum.rows - 2)
return;
if (smem[threadIdx.y][threadIdx.x] > threshold &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y - 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y + 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y][threadIdx.x - 1] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y][threadIdx.x + 1])
{
const float radius = (r - (numrho - 1) * 0.5f) * rho;
const float angle = n * theta;
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float2(radius, angle);
votes[ind] = smem[threadIdx.y][threadIdx.x];
}
}
}
int linesGetResult_gpu(DevMem2Di accum, float2* out, int* votes, int maxSize, float rho, float theta, float threshold, bool doSort)
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols, block.x - 2), divUp(accum.rows, block.y - 2));
linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, threshold, theta, rho, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int total_count;
cudaSafeCall( cudaMemcpy(&total_count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) );
total_count = ::min(total_count, maxSize);
if (doSort && total_count > 0)
{
thrust::device_ptr<float2> out_ptr(out);
thrust::device_ptr<int> votes_ptr(votes);
thrust::sort_by_key(votes_ptr, votes_ptr + total_count, out_ptr, thrust::greater<int>());
}
return total_count;
}
}
}}}
|
b9835f88d230888f4c9499adfe83def5fdb30fbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include "gputimer.h"
#define ARRAY_ROWS 16000
#define ARRAY_COLS 16000
#define TILE_WIDTH 16
#define cudaCheckError() { \
hipError_t e = hipGetLastError(); \
if (e != hipSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
hipGetErrorString(e)); \
exit(1); \
} \
}
// Function protorypes
void print_matrix( const double * h_A );
void init_matrix( double * h_A );
__global__ void matrix_mul( double *d_A, double *d_C, int width )
{
// Each thread stores it's result in this variable
double tmp = 0.0;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
// Out of bounds threads should do nothing
if ( ( row < width ) && ( col < width ) )
{
for ( int k = 0; k < width; k++ )
{
tmp += d_A[ k * width + row ] * d_A[ k * width + col ];
}
d_C[ row * width + col ] = tmp;
}
}
int main( void )
{
double ARRAY_BYTES = ARRAY_ROWS * ARRAY_COLS * sizeof( double );
double C_BYTES = ARRAY_COLS * ARRAY_COLS * sizeof( double );
GpuTimer timer;
// seed srand function
srand( time( NULL ) );
// llocate CPU memory
double * h_A = ( double * ) malloc( ARRAY_BYTES );
double * h_C = ( double * ) malloc( C_BYTES );
// Allocate GPU memory
double * d_A;
double * d_C;
hipMalloc( ( void ** ) &d_A, ARRAY_BYTES );
cudaCheckError();
hipMalloc( ( void ** ) &d_C, C_BYTES );
cudaCheckError();
// initialize matrix h_A
init_matrix( h_A );
// Copy matrix A to GPU
hipMemcpy( d_A, h_A, ARRAY_BYTES, hipMemcpyHostToDevice );
cudaCheckError();
// Calculate how many blocks need to be created
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 );
double numOfBlocks = ( double ) ceil( ( double ) ARRAY_COLS / ( double ) TILE_WIDTH );
dim3 dimGrid( numOfBlocks, numOfBlocks );
// Print info about grid/block size
printf( "Array size : ( %d X %d )\n", ARRAY_ROWS, ARRAY_COLS );
printf( "GPU will create : %lf blocks\n", numOfBlocks );
printf( "GPU will create : %d threads per block\n", TILE_WIDTH * TILE_WIDTH );
// Launch Timer & Kernel
timer.Start();
hipLaunchKernelGGL(( matrix_mul), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_A, d_C, ARRAY_COLS );
timer.Stop();
cudaCheckError();
// Time taken for the calculation
printf( "Time elapsed = %g ms\n", timer.Elapsed() );
// Copy result from GPU
hipMemcpy( h_C, d_C, C_BYTES, hipMemcpyDeviceToHost );
cudaCheckError();
// Optional: Only suitable for small arrays
// print_matrix( h_C );
// Free CPU memory
free( h_A );
free( h_C );
// Free GPU memory
hipFree( d_A );
hipFree( d_C );
return 0;
}
void init_matrix( double * h_A )
{
// Initialize array A with random double numbers
for ( int i = 0; i < ARRAY_ROWS; i++ )
for ( int j = 0; j < ARRAY_COLS; j++ )
h_A[j * ARRAY_ROWS + i ] = ( double ) rand() / ( double ) ( RAND_MAX );
}
void print_matrix( const double * h_A )
{
for ( int i = 0; i < ARRAY_COLS; i++ )
{
for ( int j = 0; j < ARRAY_COLS; j++ )
{
printf( "%lf\t", h_A[ j * ARRAY_COLS + i ] );
}
printf( "\n" );
}
} | b9835f88d230888f4c9499adfe83def5fdb30fbd.cu | #include <stdio.h>
#include <time.h>
#include "gputimer.h"
#define ARRAY_ROWS 16000
#define ARRAY_COLS 16000
#define TILE_WIDTH 16
#define cudaCheckError() { \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
cudaGetErrorString(e)); \
exit(1); \
} \
}
// Function protorypes
void print_matrix( const double * h_A );
void init_matrix( double * h_A );
__global__ void matrix_mul( double *d_A, double *d_C, int width )
{
// Each thread stores it's result in this variable
double tmp = 0.0;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
// Out of bounds threads should do nothing
if ( ( row < width ) && ( col < width ) )
{
for ( int k = 0; k < width; k++ )
{
tmp += d_A[ k * width + row ] * d_A[ k * width + col ];
}
d_C[ row * width + col ] = tmp;
}
}
int main( void )
{
double ARRAY_BYTES = ARRAY_ROWS * ARRAY_COLS * sizeof( double );
double C_BYTES = ARRAY_COLS * ARRAY_COLS * sizeof( double );
GpuTimer timer;
// seed srand function
srand( time( NULL ) );
// Αllocate CPU memory
double * h_A = ( double * ) malloc( ARRAY_BYTES );
double * h_C = ( double * ) malloc( C_BYTES );
// Allocate GPU memory
double * d_A;
double * d_C;
cudaMalloc( ( void ** ) &d_A, ARRAY_BYTES );
cudaCheckError();
cudaMalloc( ( void ** ) &d_C, C_BYTES );
cudaCheckError();
// initialize matrix h_A
init_matrix( h_A );
// Copy matrix A to GPU
cudaMemcpy( d_A, h_A, ARRAY_BYTES, cudaMemcpyHostToDevice );
cudaCheckError();
// Calculate how many blocks need to be created
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 );
double numOfBlocks = ( double ) ceil( ( double ) ARRAY_COLS / ( double ) TILE_WIDTH );
dim3 dimGrid( numOfBlocks, numOfBlocks );
// Print info about grid/block size
printf( "Array size : ( %d X %d )\n", ARRAY_ROWS, ARRAY_COLS );
printf( "GPU will create : %lf blocks\n", numOfBlocks );
printf( "GPU will create : %d threads per block\n", TILE_WIDTH * TILE_WIDTH );
// Launch Timer & Kernel
timer.Start();
matrix_mul<<< dimGrid, dimBlock >>>( d_A, d_C, ARRAY_COLS );
timer.Stop();
cudaCheckError();
// Time taken for the calculation
printf( "Time elapsed = %g ms\n", timer.Elapsed() );
// Copy result from GPU
cudaMemcpy( h_C, d_C, C_BYTES, cudaMemcpyDeviceToHost );
cudaCheckError();
// Optional: Only suitable for small arrays
// print_matrix( h_C );
// Free CPU memory
free( h_A );
free( h_C );
// Free GPU memory
cudaFree( d_A );
cudaFree( d_C );
return 0;
}
void init_matrix( double * h_A )
{
// Initialize array A with random double numbers
for ( int i = 0; i < ARRAY_ROWS; i++ )
for ( int j = 0; j < ARRAY_COLS; j++ )
h_A[j * ARRAY_ROWS + i ] = ( double ) rand() / ( double ) ( RAND_MAX );
}
void print_matrix( const double * h_A )
{
for ( int i = 0; i < ARRAY_COLS; i++ )
{
for ( int j = 0; j < ARRAY_COLS; j++ )
{
printf( "%lf\t", h_A[ j * ARRAY_COLS + i ] );
}
printf( "\n" );
}
} |
9a25ba8257044a501691f062d9ce80269bbc10c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l1;
int xdim0_update_halo_kernel1_l1_h = -1;
__constant__ int ydim0_update_halo_kernel1_l1;
int ydim0_update_halo_kernel1_l1_h = -1;
__constant__ int xdim1_update_halo_kernel1_l1;
int xdim1_update_halo_kernel1_l1_h = -1;
__constant__ int ydim1_update_halo_kernel1_l1;
int ydim1_update_halo_kernel1_l1_h = -1;
__constant__ int xdim2_update_halo_kernel1_l1;
int xdim2_update_halo_kernel1_l1_h = -1;
__constant__ int ydim2_update_halo_kernel1_l1;
int ydim2_update_halo_kernel1_l1_h = -1;
__constant__ int xdim3_update_halo_kernel1_l1;
int xdim3_update_halo_kernel1_l1_h = -1;
__constant__ int ydim3_update_halo_kernel1_l1;
int ydim3_update_halo_kernel1_l1_h = -1;
__constant__ int xdim4_update_halo_kernel1_l1;
int xdim4_update_halo_kernel1_l1_h = -1;
__constant__ int ydim4_update_halo_kernel1_l1;
int ydim4_update_halo_kernel1_l1_h = -1;
__constant__ int xdim5_update_halo_kernel1_l1;
int xdim5_update_halo_kernel1_l1_h = -1;
__constant__ int ydim5_update_halo_kernel1_l1;
int ydim5_update_halo_kernel1_l1_h = -1;
__constant__ int xdim6_update_halo_kernel1_l1;
int xdim6_update_halo_kernel1_l1_h = -1;
__constant__ int ydim6_update_halo_kernel1_l1;
int ydim6_update_halo_kernel1_l1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_l1 * (y) + \
xdim0_update_halo_kernel1_l1 * ydim0_update_halo_kernel1_l1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_l1 * (y) + \
xdim1_update_halo_kernel1_l1 * ydim1_update_halo_kernel1_l1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_l1 * (y) + \
xdim2_update_halo_kernel1_l1 * ydim2_update_halo_kernel1_l1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_l1 * (y) + \
xdim3_update_halo_kernel1_l1 * ydim3_update_halo_kernel1_l1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_l1 * (y) + \
xdim4_update_halo_kernel1_l1 * ydim4_update_halo_kernel1_l1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_l1 * (y) + \
xdim5_update_halo_kernel1_l1 * ydim5_update_halo_kernel1_l1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_l1 * (y) + \
xdim6_update_halo_kernel1_l1 * ydim6_update_halo_kernel1_l1 * (z))
// user function
__device__
inline void
update_halo_kernel1_l1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(1, 0, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(1, 0, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(1, 0, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(1, 0, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(1, 0, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(1, 0, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(1, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_l1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_l1 *
ydim0_update_halo_kernel1_l1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_l1 *
ydim1_update_halo_kernel1_l1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_l1 *
ydim2_update_halo_kernel1_l1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_l1 *
ydim3_update_halo_kernel1_l1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_l1 *
ydim4_update_halo_kernel1_l1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_l1 *
ydim5_update_halo_kernel1_l1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_l1 *
ydim6_update_halo_kernel1_l1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 62))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(62, "update_halo_kernel1_l1");
OPS_kernels[62].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_l1_h ||
ydim0 != ydim0_update_halo_kernel1_l1_h ||
xdim1 != xdim1_update_halo_kernel1_l1_h ||
ydim1 != ydim1_update_halo_kernel1_l1_h ||
xdim2 != xdim2_update_halo_kernel1_l1_h ||
ydim2 != ydim2_update_halo_kernel1_l1_h ||
xdim3 != xdim3_update_halo_kernel1_l1_h ||
ydim3 != ydim3_update_halo_kernel1_l1_h ||
xdim4 != xdim4_update_halo_kernel1_l1_h ||
ydim4 != ydim4_update_halo_kernel1_l1_h ||
xdim5 != xdim5_update_halo_kernel1_l1_h ||
ydim5 != ydim5_update_halo_kernel1_l1_h ||
xdim6 != xdim6_update_halo_kernel1_l1_h ||
ydim6 != ydim6_update_halo_kernel1_l1_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_l1_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel1_l1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_l1_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_l1_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel1_l1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_l1_h = ydim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_l1_h = xdim2;
hipMemcpyToSymbol(ydim2_update_halo_kernel1_l1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_l1_h = ydim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_l1_h = xdim3;
hipMemcpyToSymbol(ydim3_update_halo_kernel1_l1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_l1_h = ydim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_l1_h = xdim4;
hipMemcpyToSymbol(ydim4_update_halo_kernel1_l1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_l1_h = ydim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_l1_h = xdim5;
hipMemcpyToSymbol(ydim5_update_halo_kernel1_l1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_l1_h = ydim5;
hipMemcpyToSymbol(xdim6_update_halo_kernel1_l1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_l1_h = xdim6;
hipMemcpyToSymbol(ydim6_update_halo_kernel1_l1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_l1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[62].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_l1), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[62].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[62].mpi_time += t2 - t1;
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
| 9a25ba8257044a501691f062d9ce80269bbc10c4.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l1;
int xdim0_update_halo_kernel1_l1_h = -1;
__constant__ int ydim0_update_halo_kernel1_l1;
int ydim0_update_halo_kernel1_l1_h = -1;
__constant__ int xdim1_update_halo_kernel1_l1;
int xdim1_update_halo_kernel1_l1_h = -1;
__constant__ int ydim1_update_halo_kernel1_l1;
int ydim1_update_halo_kernel1_l1_h = -1;
__constant__ int xdim2_update_halo_kernel1_l1;
int xdim2_update_halo_kernel1_l1_h = -1;
__constant__ int ydim2_update_halo_kernel1_l1;
int ydim2_update_halo_kernel1_l1_h = -1;
__constant__ int xdim3_update_halo_kernel1_l1;
int xdim3_update_halo_kernel1_l1_h = -1;
__constant__ int ydim3_update_halo_kernel1_l1;
int ydim3_update_halo_kernel1_l1_h = -1;
__constant__ int xdim4_update_halo_kernel1_l1;
int xdim4_update_halo_kernel1_l1_h = -1;
__constant__ int ydim4_update_halo_kernel1_l1;
int ydim4_update_halo_kernel1_l1_h = -1;
__constant__ int xdim5_update_halo_kernel1_l1;
int xdim5_update_halo_kernel1_l1_h = -1;
__constant__ int ydim5_update_halo_kernel1_l1;
int ydim5_update_halo_kernel1_l1_h = -1;
__constant__ int xdim6_update_halo_kernel1_l1;
int xdim6_update_halo_kernel1_l1_h = -1;
__constant__ int ydim6_update_halo_kernel1_l1;
int ydim6_update_halo_kernel1_l1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_l1 * (y) + \
xdim0_update_halo_kernel1_l1 * ydim0_update_halo_kernel1_l1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_l1 * (y) + \
xdim1_update_halo_kernel1_l1 * ydim1_update_halo_kernel1_l1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_l1 * (y) + \
xdim2_update_halo_kernel1_l1 * ydim2_update_halo_kernel1_l1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_l1 * (y) + \
xdim3_update_halo_kernel1_l1 * ydim3_update_halo_kernel1_l1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_l1 * (y) + \
xdim4_update_halo_kernel1_l1 * ydim4_update_halo_kernel1_l1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_l1 * (y) + \
xdim5_update_halo_kernel1_l1 * ydim5_update_halo_kernel1_l1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_l1 * (y) + \
xdim6_update_halo_kernel1_l1 * ydim6_update_halo_kernel1_l1 * (z))
// user function
__device__
inline void
update_halo_kernel1_l1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(1, 0, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(1, 0, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(1, 0, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(1, 0, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(1, 0, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(1, 0, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(1, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_l1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_l1 *
ydim0_update_halo_kernel1_l1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_l1 *
ydim1_update_halo_kernel1_l1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_l1 *
ydim2_update_halo_kernel1_l1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_l1 *
ydim3_update_halo_kernel1_l1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_l1 *
ydim4_update_halo_kernel1_l1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_l1 *
ydim5_update_halo_kernel1_l1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_l1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_l1 *
ydim6_update_halo_kernel1_l1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 62))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(62, "update_halo_kernel1_l1");
OPS_kernels[62].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_l1_h ||
ydim0 != ydim0_update_halo_kernel1_l1_h ||
xdim1 != xdim1_update_halo_kernel1_l1_h ||
ydim1 != ydim1_update_halo_kernel1_l1_h ||
xdim2 != xdim2_update_halo_kernel1_l1_h ||
ydim2 != ydim2_update_halo_kernel1_l1_h ||
xdim3 != xdim3_update_halo_kernel1_l1_h ||
ydim3 != ydim3_update_halo_kernel1_l1_h ||
xdim4 != xdim4_update_halo_kernel1_l1_h ||
ydim4 != ydim4_update_halo_kernel1_l1_h ||
xdim5 != xdim5_update_halo_kernel1_l1_h ||
ydim5 != ydim5_update_halo_kernel1_l1_h ||
xdim6 != xdim6_update_halo_kernel1_l1_h ||
ydim6 != ydim6_update_halo_kernel1_l1_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_l1_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel1_l1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_l1_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_l1_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel1_l1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_l1_h = ydim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_l1_h = xdim2;
cudaMemcpyToSymbol(ydim2_update_halo_kernel1_l1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_l1_h = ydim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_l1_h = xdim3;
cudaMemcpyToSymbol(ydim3_update_halo_kernel1_l1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_l1_h = ydim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_l1_h = xdim4;
cudaMemcpyToSymbol(ydim4_update_halo_kernel1_l1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_l1_h = ydim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_l1_h = xdim5;
cudaMemcpyToSymbol(ydim5_update_halo_kernel1_l1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_l1_h = ydim5;
cudaMemcpyToSymbol(xdim6_update_halo_kernel1_l1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_l1_h = xdim6;
cudaMemcpyToSymbol(ydim6_update_halo_kernel1_l1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_l1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[62].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_l1<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[62].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[62].mpi_time += t2 - t1;
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[62].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
|
b0711f846a6aa3323c9e5204b38e082e240c8e4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "flowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void FlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
const scalar_t* __restrict__ count,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
}
}
return ;
}
int FlowProjection_gpu_forward_kernel(
hipStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_forward_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjection_gpu_forward_kernelfunc), dim3(grid),dim3(block),0, stream ,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjectionAveraging_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjectionAveraging_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(hipGetLastError());
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowFillhole_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowFillhole_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int FlowProjection_gpu_backward_kernel(
hipStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& gradoutput,
at::Tensor& gradinput1
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_backward_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjection_gpu_backward_kernelfunc) , dim3(grid),dim3(block),0, stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),
count.data<scalar_t>(),
gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
}
| b0711f846a6aa3323c9e5204b38e082e240c8e4b.cu | #include <stdio.h>
#include "flowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void FlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
const scalar_t* __restrict__ count,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
}
}
return ;
}
int FlowProjection_gpu_forward_kernel(
cudaStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_forward_kernelfunc", ([&] {
FlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjectionAveraging_kernelfunc", ([&] {
FlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(cudaGetLastError());
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowFillhole_kernelfunc", ([&] {
FlowFillhole_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int FlowProjection_gpu_backward_kernel(
cudaStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& gradoutput,
at::Tensor& gradinput1
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_backward_kernelfunc", ([&] {
FlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),
count.data<scalar_t>(),
gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
}
|
525b6b336b98c170e14b3ccc849426980c97d504.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA BFS kernel
// Data-Driven: base data-driven algorithm, global worklist in memory
// one vertex per thread (thread-centric), local thread aggregate
// tasks first before pushing to global worklist
// perform vertex operations only on the worklist
// need atomicAdd for maintaining the shared worklist
// Reference:
// Rupesh Nasre, etc. Data-driven versus Topology-driven
// Irregular Computations on GPUs
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define WORKLIST_SIZE 16777216
#define LOCAL_SIZE 128
// a dummy worklist that you can only push or clear
typedef struct my_worklist
{
void init(void)
{
cudaErrCheck( hipMalloc((void**)&item_array, WORKLIST_SIZE*sizeof(uint64_t)) );
cudaErrCheck( hipMalloc((void**)&end, sizeof(uint32_t)) );
clear();
}
void clear(void)
{
uint32_t zeronum=0;
cudaErrCheck( hipMemcpy(end, &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
}
void free(void)
{
cudaErrCheck( hipFree(item_array) );
cudaErrCheck( hipFree(end) );
}
__device__ void pushRange(uint64_t * from_array, uint32_t num)
{
uint32_t old_end = atomicAdd(end, num);
for (uint32_t i=0;i<num;i++)
{
item_array[i+old_end] = from_array[i];
}
}
__device__ inline uint64_t get_item(unsigned index)
{
return item_array[index];
}
__device__ inline uint32_t get_item_num(void)
{
return (*end);
}
void host_initPush(uint64_t * from_array, uint32_t num)
{
cudaErrCheck( hipMemcpy(end, &num, sizeof(uint32_t),
hipMemcpyHostToDevice) );
cudaErrCheck( hipMemcpy(item_array, from_array, num*sizeof(uint64_t),
hipMemcpyHostToDevice) );
}
uint64_t *item_array;
uint32_t *end;
}my_worklist;
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph,
my_worklist inworklist, my_worklist outworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
uint64_t edge_begin = graph.get_firstedge_index(v);
uint64_t edge_end = graph.get_edge_index_end(v);
uint32_t curr = vplist[v];
uint64_t local_worklist[LOCAL_SIZE];
uint32_t work_size=0;
for (uint64_t i=edge_begin;i<edge_end;i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
vplist[vid] = curr + 1;
// push to local worklist
local_worklist[work_size] = vid;
work_size++;
if (work_size==LOCAL_SIZE)
{
outworklist.pushRange(local_worklist, work_size);
work_size = 0;
}
}
}
// push local worklist to shared worklist
outworklist.pushRange(local_worklist, work_size);
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// initialize the worklists for in & out
my_worklist worklist1, worklist2;
worklist1.init();
worklist2.init();
my_worklist * in_worklist = &worklist1;
my_worklist * out_worklist = &worklist2;
in_worklist->host_initPush(&root, 1);
uint32_t zeronum=0;
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
hipEventRecord(start_event, 0);
int curr=0;
unsigned wl_size=1;
while(wl_size!=0)
{
// Each iteration processes
// one level of BFS traversal
num_thread_per_block = (unsigned int) wl_size;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
num_block = (unsigned int)ceil( wl_size/(double)num_thread_per_block );
hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph, *in_worklist, *out_worklist);
my_worklist * temp=in_worklist;
in_worklist = out_worklist;
out_worklist = temp;
cudaErrCheck( hipMemcpy(&wl_size, in_worklist->end, sizeof(uint32_t), hipMemcpyDeviceToHost) );
out_worklist->clear();
curr++;
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
in_worklist->free();
out_worklist->free();
cudaErrCheck( hipFree(device_vpl) );
}
| 525b6b336b98c170e14b3ccc849426980c97d504.cu | //=================================================================//
// CUDA BFS kernel
// Data-Driven: base data-driven algorithm, global worklist in memory
// one vertex per thread (thread-centric), local thread aggregate
// tasks first before pushing to global worklist
// perform vertex operations only on the worklist
// need atomicAdd for maintaining the shared worklist
// Reference:
// Rupesh Nasre, etc. Data-driven versus Topology-driven
// Irregular Computations on GPUs
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define WORKLIST_SIZE 16777216
#define LOCAL_SIZE 128
// a dummy worklist that you can only push or clear
typedef struct my_worklist
{
void init(void)
{
cudaErrCheck( cudaMalloc((void**)&item_array, WORKLIST_SIZE*sizeof(uint64_t)) );
cudaErrCheck( cudaMalloc((void**)&end, sizeof(uint32_t)) );
clear();
}
void clear(void)
{
uint32_t zeronum=0;
cudaErrCheck( cudaMemcpy(end, &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
}
void free(void)
{
cudaErrCheck( cudaFree(item_array) );
cudaErrCheck( cudaFree(end) );
}
__device__ void pushRange(uint64_t * from_array, uint32_t num)
{
uint32_t old_end = atomicAdd(end, num);
for (uint32_t i=0;i<num;i++)
{
item_array[i+old_end] = from_array[i];
}
}
__device__ inline uint64_t get_item(unsigned index)
{
return item_array[index];
}
__device__ inline uint32_t get_item_num(void)
{
return (*end);
}
void host_initPush(uint64_t * from_array, uint32_t num)
{
cudaErrCheck( cudaMemcpy(end, &num, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaErrCheck( cudaMemcpy(item_array, from_array, num*sizeof(uint64_t),
cudaMemcpyHostToDevice) );
}
uint64_t *item_array;
uint32_t *end;
}my_worklist;
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph,
my_worklist inworklist, my_worklist outworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
uint64_t edge_begin = graph.get_firstedge_index(v);
uint64_t edge_end = graph.get_edge_index_end(v);
uint32_t curr = vplist[v];
uint64_t local_worklist[LOCAL_SIZE];
uint32_t work_size=0;
for (uint64_t i=edge_begin;i<edge_end;i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
vplist[vid] = curr + 1;
// push to local worklist
local_worklist[work_size] = vid;
work_size++;
if (work_size==LOCAL_SIZE)
{
outworklist.pushRange(local_worklist, work_size);
work_size = 0;
}
}
}
// push local worklist to shared worklist
outworklist.pushRange(local_worklist, work_size);
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// initialize the worklists for in & out
my_worklist worklist1, worklist2;
worklist1.init();
worklist2.init();
my_worklist * in_worklist = &worklist1;
my_worklist * out_worklist = &worklist2;
in_worklist->host_initPush(&root, 1);
uint32_t zeronum=0;
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
cudaEventRecord(start_event, 0);
int curr=0;
unsigned wl_size=1;
while(wl_size!=0)
{
// Each iteration processes
// one level of BFS traversal
num_thread_per_block = (unsigned int) wl_size;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
num_block = (unsigned int)ceil( wl_size/(double)num_thread_per_block );
kernel<<<num_block, num_thread_per_block>>>(device_vpl, d_graph, *in_worklist, *out_worklist);
my_worklist * temp=in_worklist;
in_worklist = out_worklist;
out_worklist = temp;
cudaErrCheck( cudaMemcpy(&wl_size, in_worklist->end, sizeof(uint32_t), cudaMemcpyDeviceToHost) );
out_worklist->clear();
curr++;
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
in_worklist->free();
out_worklist->free();
cudaErrCheck( cudaFree(device_vpl) );
}
|
73eaf3727fbb3d7fb6e3c6c1179991f35fc6a6a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* MIT License
Copyright (c) 2018 Biro Eniko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "util/common.h"
#include "util/globals.h"
#include "util/renderer.h"
#include "hitables/sphere.h"
#include "hitables/hitablelist.h"
#include "util/camera.h"
#include "materials/material.h"
#include "util/scene.cuh"
#include "util/window.h"
CUDA_DEV int numHitables = 0;
#ifdef CUDA_ENABLED
void initializeWorldCuda(bool showWindow, bool writeImagePPM, bool writeImagePNG, hitable*** list, hitable** world, Window** w, Image** image, Camera** cam, Renderer** renderer)
{
int choice = 6;
switch(choice)
{
case 0:
numHitables = 4;
break;
case 1:
numHitables = 58;
break;
case 2:
numHitables = 901;
break;
case 3:
numHitables = 102;
break;
case 4:
numHitables = 68;
break;
case 5:
numHitables = 197;
break;
case 6:
numHitables = 197;
break;
}
// World
checkCudaErrors(hipMallocManaged(list, numHitables*sizeof(hitable*)));
hitable** worldPtr;
checkCudaErrors(hipMallocManaged(&worldPtr, sizeof(hitable*)));
switch(choice)
{
case 0:
hipLaunchKernelGGL(( simpleScene), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 1:
hipLaunchKernelGGL(( simpleScene2), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 2:
hipLaunchKernelGGL(( randomScene), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 3:
hipLaunchKernelGGL(( randomScene2), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 4:
hipLaunchKernelGGL(( randomScene3), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 5:
hipLaunchKernelGGL(( randomScene4), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 6:
hipLaunchKernelGGL(( randomSceneWithMovingSpheres), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
*world = *worldPtr;
checkCudaErrors(hipFree(worldPtr));
// Camera
checkCudaErrors(hipMallocManaged(cam, sizeof(Camera)));
new (*cam) Camera(lookFrom, lookAt, vup, 20.0f,
float(nx)/float(ny), distToFocus, aperture);
// Renderer
checkCudaErrors(hipMallocManaged(renderer, sizeof(Renderer)));
new (*renderer) Renderer(showWindow, writeImagePPM, writeImagePNG);
// Image
checkCudaErrors(hipMallocManaged(image, sizeof(Image)));
new (*image) Image(showWindow, writeImagePPM || writeImagePNG, nx, ny, tx, ty);
// Window
if (showWindow)
*w = new Window(*cam, *renderer, nx, ny, thetaInit, phiInit, zoomScale, stepScale);
}
CUDA_GLOBAL void freeWorldCuda(hitable** list, hitable** world)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numHitables; i++)
{
delete ((sphere *)list[i])->matPtr;
delete list[i];
}
//delete *world;
}
}
void destroyWorldCuda(bool showWindow, hitable** list, hitable* world, Window* w, Image* image, Camera* cam, Renderer* render)
{
hipLaunchKernelGGL(( freeWorldCuda), dim3(1),dim3(1), 0, 0, list, &world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(cam));
checkCudaErrors(hipFree(render));
checkCudaErrors(hipFree(image));
}
CUDA_GLOBAL void render(Camera* cam, Image* image, hitable* world, Renderer* render, int sampleCount)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= image->nx) || (j >= image->ny))
return;
int pixelIndex = j*image->nx + i;
// Render the samples in batches
for (int s = 0; s < nsBatch; s++)
{
RandomGenerator rng(sampleCount * nsBatch + s, pixelIndex);
float u = float(i + rng.get1f()) / float(image->nx); // left to right
float v = float(j + rng.get1f()) / float(image->ny); // bottom to top
ray r = cam->getRay(rng, u, v);
image->pixels[pixelIndex] += render->color(rng, r, world, 0);
}
vec3 col = image->pixels[pixelIndex] / (sampleCount * nsBatch);
image->pixels2[pixelIndex] = col;
}
CUDA_GLOBAL void display(Image* image)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int pixelIndex = j*image->nx + i;
vec3 col = image->pixels2[pixelIndex];
// Gamma encoding of images is used to optimize the usage of bits
// when encoding an image, or bandwidth used to transport an image,
// by taking advantage of the non-linear manner in which humans perceive
// light and color. (wikipedia)
// we use gamma 2: raising the color to the power 1/gamma (1/2)
col = vec3(sqrt(col[0]), sqrt(col[1]), sqrt(col[2]));
int ir = clamp(int(255.f*col[0]), 0, 255);
int ig = clamp(int(255.f*col[1]), 0, 255);
int ib = clamp(int(255.f*col[2]), 0, 255);
if (image->writeImage)
{
// PNG
int index = (image->ny - 1 - j) * image->nx + i;
int index3 = 3 * index;
image->fileOutputImage[index3 + 0] = ir;
image->fileOutputImage[index3 + 1] = ig;
image->fileOutputImage[index3 + 2] = ib;
}
if (image->showWindow)
image->windowPixels[(image->ny-j-1)*image->nx + i] = (ir << 16) | (ig << 8) | (ib);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Renderer::cudaRender(Camera* cam, hitable* world, Image* image, int sampleCount)
{
dim3 blocks( (image->nx + image->tx - 1)/image->tx, (image->ny + image->ty - 1)/image->ty);
dim3 threads(image->tx, image->ty);
// Kernel call for the computation of pixel colors.
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, cam, image, world, this, sampleCount);
// Denoise here.
#ifdef OIDN_ENABLED
checkCudaErrors(hipDeviceSynchronize());
image->denoise();
checkCudaErrors(hipDeviceSynchronize());
#endif // OIDN_ENABLED
// Kernel call to fill the output buffers.
hipLaunchKernelGGL(( display), dim3(blocks), dim3(threads), 0, 0, image);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
#endif // CUDA_ENABLED
| 73eaf3727fbb3d7fb6e3c6c1179991f35fc6a6a5.cu | /* MIT License
Copyright (c) 2018 Biro Eniko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "util/common.h"
#include "util/globals.h"
#include "util/renderer.h"
#include "hitables/sphere.h"
#include "hitables/hitablelist.h"
#include "util/camera.h"
#include "materials/material.h"
#include "util/scene.cuh"
#include "util/window.h"
CUDA_DEV int numHitables = 0;
#ifdef CUDA_ENABLED
void initializeWorldCuda(bool showWindow, bool writeImagePPM, bool writeImagePNG, hitable*** list, hitable** world, Window** w, Image** image, Camera** cam, Renderer** renderer)
{
int choice = 6;
switch(choice)
{
case 0:
numHitables = 4;
break;
case 1:
numHitables = 58;
break;
case 2:
numHitables = 901;
break;
case 3:
numHitables = 102;
break;
case 4:
numHitables = 68;
break;
case 5:
numHitables = 197;
break;
case 6:
numHitables = 197;
break;
}
// World
checkCudaErrors(cudaMallocManaged(list, numHitables*sizeof(hitable*)));
hitable** worldPtr;
checkCudaErrors(cudaMallocManaged(&worldPtr, sizeof(hitable*)));
switch(choice)
{
case 0:
simpleScene<<<1,1>>>(*list, worldPtr);
break;
case 1:
simpleScene2<<<1,1>>>(*list, worldPtr);
break;
case 2:
randomScene<<<1,1>>>(*list, worldPtr);
break;
case 3:
randomScene2<<<1,1>>>(*list, worldPtr);
break;
case 4:
randomScene3<<<1,1>>>(*list, worldPtr);
break;
case 5:
randomScene4<<<1,1>>>(*list, worldPtr);
break;
case 6:
randomSceneWithMovingSpheres<<<1,1>>>(*list, worldPtr);
break;
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
*world = *worldPtr;
checkCudaErrors(cudaFree(worldPtr));
// Camera
checkCudaErrors(cudaMallocManaged(cam, sizeof(Camera)));
new (*cam) Camera(lookFrom, lookAt, vup, 20.0f,
float(nx)/float(ny), distToFocus, aperture);
// Renderer
checkCudaErrors(cudaMallocManaged(renderer, sizeof(Renderer)));
new (*renderer) Renderer(showWindow, writeImagePPM, writeImagePNG);
// Image
checkCudaErrors(cudaMallocManaged(image, sizeof(Image)));
new (*image) Image(showWindow, writeImagePPM || writeImagePNG, nx, ny, tx, ty);
// Window
if (showWindow)
*w = new Window(*cam, *renderer, nx, ny, thetaInit, phiInit, zoomScale, stepScale);
}
CUDA_GLOBAL void freeWorldCuda(hitable** list, hitable** world)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numHitables; i++)
{
delete ((sphere *)list[i])->matPtr;
delete list[i];
}
//delete *world;
}
}
void destroyWorldCuda(bool showWindow, hitable** list, hitable* world, Window* w, Image* image, Camera* cam, Renderer* render)
{
freeWorldCuda<<<1,1>>>(list, &world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(cam));
checkCudaErrors(cudaFree(render));
checkCudaErrors(cudaFree(image));
}
CUDA_GLOBAL void render(Camera* cam, Image* image, hitable* world, Renderer* render, int sampleCount)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= image->nx) || (j >= image->ny))
return;
int pixelIndex = j*image->nx + i;
// Render the samples in batches
for (int s = 0; s < nsBatch; s++)
{
RandomGenerator rng(sampleCount * nsBatch + s, pixelIndex);
float u = float(i + rng.get1f()) / float(image->nx); // left to right
float v = float(j + rng.get1f()) / float(image->ny); // bottom to top
ray r = cam->getRay(rng, u, v);
image->pixels[pixelIndex] += render->color(rng, r, world, 0);
}
vec3 col = image->pixels[pixelIndex] / (sampleCount * nsBatch);
image->pixels2[pixelIndex] = col;
}
CUDA_GLOBAL void display(Image* image)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int pixelIndex = j*image->nx + i;
vec3 col = image->pixels2[pixelIndex];
// Gamma encoding of images is used to optimize the usage of bits
// when encoding an image, or bandwidth used to transport an image,
// by taking advantage of the non-linear manner in which humans perceive
// light and color. (wikipedia)
// we use gamma 2: raising the color to the power 1/gamma (1/2)
col = vec3(sqrt(col[0]), sqrt(col[1]), sqrt(col[2]));
int ir = clamp(int(255.f*col[0]), 0, 255);
int ig = clamp(int(255.f*col[1]), 0, 255);
int ib = clamp(int(255.f*col[2]), 0, 255);
if (image->writeImage)
{
// PNG
int index = (image->ny - 1 - j) * image->nx + i;
int index3 = 3 * index;
image->fileOutputImage[index3 + 0] = ir;
image->fileOutputImage[index3 + 1] = ig;
image->fileOutputImage[index3 + 2] = ib;
}
if (image->showWindow)
image->windowPixels[(image->ny-j-1)*image->nx + i] = (ir << 16) | (ig << 8) | (ib);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Renderer::cudaRender(Camera* cam, hitable* world, Image* image, int sampleCount)
{
dim3 blocks( (image->nx + image->tx - 1)/image->tx, (image->ny + image->ty - 1)/image->ty);
dim3 threads(image->tx, image->ty);
// Kernel call for the computation of pixel colors.
render<<<blocks, threads>>>(cam, image, world, this, sampleCount);
// Denoise here.
#ifdef OIDN_ENABLED
checkCudaErrors(cudaDeviceSynchronize());
image->denoise();
checkCudaErrors(cudaDeviceSynchronize());
#endif // OIDN_ENABLED
// Kernel call to fill the output buffers.
display<<<blocks, threads>>>(image);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
#endif // CUDA_ENABLED
|
c2209a3fe3f8871eddb7fcc345dbaae548147711.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
extern "C"
{
__global__ void testKernel(int* addr, unsigned short param1, char param2)
{
addr[0] = param1 + param2;
}
}
char* muGetErrorString(hipError_t result);
void muEC(int position) //checks and outputs error position and error string
{
hipError_t errcode = hipGetLastError();
if(errcode==hipSuccess)
{
printf("No error at position %i\n", position);
return;
}
printf("Error position: %i\nCode:%s\n", position, hipGetErrorString(errcode));
}
void muRC(int position, hipError_t result)
{
if(result==0)
printf("Success at %i\n", position);
else
printf("Error at %i:%s\n", position, muGetErrorString(result));
}
char* muGetErrorString(hipError_t result)
{
switch(result)
{
case 0: return "Success";
case 1: return "Invalid value";
case 2: return "Out of memory";
case 3: return "Not Initialized";
case 4: return "Deinitialized";
case 100: return "No device";
case 101: return "Invalid device";
case 200: return "Invalid image";
case 201: return "Invalid context";
case 202: return "Context already current";
case 205: return "Map failed";
case 206: return "Unmap failed";
case 207: return "Array is mapped";
case 208: return "Already mapped";
case 209: return "No binary for GPU";
case 210: return "Already acquired";
case 211: return "Not mapped";
case 300: return "Invalid source";
case 301: return "File not found";
case 400: return "Invalid handle";
case 500: return "Not found";
case 600: return "Not ready";
case 700: return "Launch failed";
case 701: return "Launch out of resources";
case 702: return "Launch timeout";
case 703: return "Launch incompatible texturing";
case 999: return "Unknown";
};
return "Unknown";
}
int main( int argc, char** argv)
{
if(argc<3)
{
puts("arguments: cubinname kernelname length tcount interval choice");
puts(" length: number of 4-byte elements to allocate in memory");
puts(" tcount: number of threads");
puts(" interval: number of output items per group");
puts(" choice: 0, all; 1, odd group only; 2, even group only");
return 0;
}
int length = 8;
if(argc>=4)
{
length = atoi(argv[3]);
}
int tcount = 1;
if(argc>=5)
{
tcount = atoi(argv[4]);
}
int size = sizeof(int)*length;
int interval = 1;
if(argc>=6)
{
interval = atoi(argv[5]);
}
bool odd = true;
bool even = true;
if(argc>=7)
{
int choice = atoi(argv[6]);
if(choice==1)
even = false;
else if(choice==2)
odd = false;
}
hipDeviceptr_t gpu_output, gpu_input;
int *cpu_output, *cpu_input, *cpu_compare;
cpu_output = (int*)malloc(size);
cpu_compare = (int*)malloc(size);
cpu_input = (int*)malloc(size);
for(int i=0; i<length; i++)
{
cpu_input[i]=i;
cpu_compare[i]=i*16;
}
hipDevice_t device;
hipCtx_t context;
muRC(100, hipInit(0));
muRC(95, hipDeviceGet(&device, 0));
muRC(92, hipCtxCreate(&context, HIP_CTX_SCHED_SPIN, device));
muRC(90, cuMemAlloc(&gpu_output, size));
muRC(90, cuMemAlloc(&gpu_input, size));
muRC(90, cuMemcpyHtoD(gpu_input, cpu_input, size));
hipEvent_t eStart, eStop;
muRC(89, hipEventCreate(&eStart, hipEventDefault));
muRC(88, hipEventCreate(&eStop, hipEventDefault));
hipModule_t module;
hipFunction_t kernel;
hipError_t result = hipModuleLoad(&module, argv[1]);
muRC(0 , result);
result = hipModuleGetFunction(&kernel, module, argv[2]);
muRC(1, result);
int param = 0x1010;
muRC(2, hipParamSetSize(kernel, 20));
muRC(3, hipParamSetv(kernel, 0, &gpu_input, 8));
muRC(3, hipParamSetv(kernel, 8, &gpu_output, 8));
muRC(3, hipParamSetv(kernel, 16, &length, 4));
muRC(4, hipFuncSetBlockShape(kernel, tcount,1,1));
muRC(41, hipEventRecord(eStart,0) );
muRC(5, hipLaunch(kernel));
muRC(51, hipEventRecord(eStop,0) );
muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size));
muRC(7, hipCtxSynchronize());
float time;
muRC(75, hipEventElapsedTime(&time, eStart, eStop));
printf("length=%i\n", length);
printf("tcount=%i\n", tcount);
printf("time=%f\n", time);
for(int i=0; i<length; i++)
{
if(cpu_compare[i]!=cpu_output[i])
{
puts("Error exists");
break;
}
}
muRC(8, hipModuleUnload(module));
muRC(9, hipFree(gpu_output));
muRC(9, hipFree(gpu_input));
muRC(10, hipCtxDestroy(context));
delete[] cpu_output, cpu_input, cpu_compare;
return 0;
}
| c2209a3fe3f8871eddb7fcc345dbaae548147711.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
extern "C"
{
__global__ void testKernel(int* addr, unsigned short param1, char param2)
{
addr[0] = param1 + param2;
}
}
char* muGetErrorString(CUresult result);
void muEC(int position) //checks and outputs error position and error string
{
cudaError_t errcode = cudaGetLastError();
if(errcode==cudaSuccess)
{
printf("No error at position %i\n", position);
return;
}
printf("Error position: %i\nCode:%s\n", position, cudaGetErrorString(errcode));
}
void muRC(int position, CUresult result)
{
if(result==0)
printf("Success at %i\n", position);
else
printf("Error at %i:%s\n", position, muGetErrorString(result));
}
char* muGetErrorString(CUresult result)
{
switch(result)
{
case 0: return "Success";
case 1: return "Invalid value";
case 2: return "Out of memory";
case 3: return "Not Initialized";
case 4: return "Deinitialized";
case 100: return "No device";
case 101: return "Invalid device";
case 200: return "Invalid image";
case 201: return "Invalid context";
case 202: return "Context already current";
case 205: return "Map failed";
case 206: return "Unmap failed";
case 207: return "Array is mapped";
case 208: return "Already mapped";
case 209: return "No binary for GPU";
case 210: return "Already acquired";
case 211: return "Not mapped";
case 300: return "Invalid source";
case 301: return "File not found";
case 400: return "Invalid handle";
case 500: return "Not found";
case 600: return "Not ready";
case 700: return "Launch failed";
case 701: return "Launch out of resources";
case 702: return "Launch timeout";
case 703: return "Launch incompatible texturing";
case 999: return "Unknown";
};
return "Unknown";
}
int main( int argc, char** argv)
{
if(argc<3)
{
puts("arguments: cubinname kernelname length tcount interval choice");
puts(" length: number of 4-byte elements to allocate in memory");
puts(" tcount: number of threads");
puts(" interval: number of output items per group");
puts(" choice: 0, all; 1, odd group only; 2, even group only");
return 0;
}
int length = 8;
if(argc>=4)
{
length = atoi(argv[3]);
}
int tcount = 1;
if(argc>=5)
{
tcount = atoi(argv[4]);
}
int size = sizeof(int)*length;
int interval = 1;
if(argc>=6)
{
interval = atoi(argv[5]);
}
bool odd = true;
bool even = true;
if(argc>=7)
{
int choice = atoi(argv[6]);
if(choice==1)
even = false;
else if(choice==2)
odd = false;
}
CUdeviceptr gpu_output, gpu_input;
int *cpu_output, *cpu_input, *cpu_compare;
cpu_output = (int*)malloc(size);
cpu_compare = (int*)malloc(size);
cpu_input = (int*)malloc(size);
for(int i=0; i<length; i++)
{
cpu_input[i]=i;
cpu_compare[i]=i*16;
}
CUdevice device;
CUcontext context;
muRC(100, cuInit(0));
muRC(95, cuDeviceGet(&device, 0));
muRC(92, cuCtxCreate(&context, CU_CTX_SCHED_SPIN, device));
muRC(90, cuMemAlloc(&gpu_output, size));
muRC(90, cuMemAlloc(&gpu_input, size));
muRC(90, cuMemcpyHtoD(gpu_input, cpu_input, size));
CUevent eStart, eStop;
muRC(89, cuEventCreate(&eStart, CU_EVENT_DEFAULT));
muRC(88, cuEventCreate(&eStop, CU_EVENT_DEFAULT));
CUmodule module;
CUfunction kernel;
CUresult result = cuModuleLoad(&module, argv[1]);
muRC(0 , result);
result = cuModuleGetFunction(&kernel, module, argv[2]);
muRC(1, result);
int param = 0x1010;
muRC(2, cuParamSetSize(kernel, 20));
muRC(3, cuParamSetv(kernel, 0, &gpu_input, 8));
muRC(3, cuParamSetv(kernel, 8, &gpu_output, 8));
muRC(3, cuParamSetv(kernel, 16, &length, 4));
muRC(4, cuFuncSetBlockShape(kernel, tcount,1,1));
muRC(41, cuEventRecord(eStart,0) );
muRC(5, cuLaunch(kernel));
muRC(51, cuEventRecord(eStop,0) );
muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size));
muRC(7, cuCtxSynchronize());
float time;
muRC(75, cuEventElapsedTime(&time, eStart, eStop));
printf("length=%i\n", length);
printf("tcount=%i\n", tcount);
printf("time=%f\n", time);
for(int i=0; i<length; i++)
{
if(cpu_compare[i]!=cpu_output[i])
{
puts("Error exists");
break;
}
}
muRC(8, cuModuleUnload(module));
muRC(9, cuMemFree(gpu_output));
muRC(9, cuMemFree(gpu_input));
muRC(10, cuCtxDestroy(context));
delete[] cpu_output, cpu_input, cpu_compare;
return 0;
}
|
36093b1a9a07f0af70bb7ac7618c050a3c2834d8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <gtest/internal/gtest-type-util.h>
#include <rmm/device_uvector.hpp>
#include <thrust/logical.h>
template <typename T>
struct TypedUVectorTest : ::testing::Test {
hipStream_t stream() const noexcept { return hipStream_t{0}; }
};
using TestTypes = ::testing::Types<int8_t, int32_t, uint64_t, float, double>;
TYPED_TEST_CASE(TypedUVectorTest, TestTypes);
TYPED_TEST(TypedUVectorTest, ZeroSizeConstructor)
{
rmm::device_uvector<TypeParam> uv(0, this->stream());
EXPECT_EQ(uv.size(), 0);
EXPECT_EQ(uv.end(), uv.begin());
EXPECT_TRUE(uv.is_empty());
}
TYPED_TEST(TypedUVectorTest, NonZeroSizeConstructor)
{
rmm::device_uvector<TypeParam> uv(12345, this->stream());
EXPECT_EQ(uv.size(), 12345);
EXPECT_NE(uv.data(), nullptr);
EXPECT_EQ(uv.end(), uv.begin() + uv.size());
EXPECT_FALSE(uv.is_empty());
EXPECT_NE(uv.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, CopyConstructor)
{
rmm::device_uvector<TypeParam> uv(12345, this->stream());
rmm::device_uvector<TypeParam> uv_copy(uv, this->stream());
EXPECT_EQ(uv_copy.size(), uv.size());
EXPECT_NE(uv_copy.data(), nullptr);
EXPECT_EQ(uv_copy.end(), uv_copy.begin() + uv_copy.size());
EXPECT_FALSE(uv_copy.is_empty());
EXPECT_NE(uv_copy.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, ResizeSmaller)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
auto original_begin = uv.begin();
auto smaller_size = uv.size() - 1;
uv.resize(smaller_size, this->stream());
EXPECT_EQ(original_data, uv.data());
EXPECT_EQ(original_begin, uv.begin());
EXPECT_EQ(uv.size(), smaller_size);
EXPECT_EQ(uv.capacity(), original_size);
// shrink_to_fit should force a new allocation
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.size(), smaller_size);
EXPECT_EQ(uv.capacity(), smaller_size);
}
TYPED_TEST(TypedUVectorTest, ResizeLarger)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
auto original_begin = uv.begin();
auto larger_size = uv.size() + 1;
uv.resize(larger_size, this->stream());
EXPECT_NE(uv.data(), original_data);
EXPECT_NE(uv.begin(), original_begin);
EXPECT_EQ(uv.size(), larger_size);
EXPECT_EQ(uv.capacity(), larger_size);
auto larger_data = uv.data();
auto larger_begin = uv.begin();
// shrink_to_fit shouldn't have any effect
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.size(), larger_size);
EXPECT_EQ(uv.capacity(), larger_size);
EXPECT_EQ(uv.data(), larger_data);
EXPECT_EQ(uv.begin(), larger_begin);
}
TYPED_TEST(TypedUVectorTest, ResizeToZero)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
uv.resize(0, this->stream());
EXPECT_EQ(uv.size(), 0);
EXPECT_TRUE(uv.is_empty());
EXPECT_EQ(uv.capacity(), original_size);
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.capacity(), 0);
}
TYPED_TEST(TypedUVectorTest, Release)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
rmm::device_buffer storage = uv.release();
EXPECT_EQ(uv.size(), 0);
EXPECT_EQ(uv.capacity(), 0);
EXPECT_TRUE(uv.is_empty());
EXPECT_EQ(storage.data(), original_data);
EXPECT_EQ(storage.size(), original_size * sizeof(TypeParam));
}
TYPED_TEST(TypedUVectorTest, ElementPointer)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
EXPECT_NE(uv.element_ptr(i), nullptr);
}
}
TYPED_TEST(TypedUVectorTest, OOBSetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
EXPECT_THROW(uv.set_element(uv.size() + 1, 42, this->stream()), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, OOBGetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
EXPECT_THROW(uv.element(uv.size() + 1, this->stream()), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, GetSetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
uv.set_element(i, i, this->stream());
EXPECT_EQ(static_cast<TypeParam>(i), uv.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, GetSetElementAsync)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
uv.set_element_async(i, i, this->stream());
EXPECT_EQ(static_cast<TypeParam>(i), uv.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, FrontBackElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
auto first = TypeParam{42};
auto last = TypeParam{13};
uv.set_element(0, first, this->stream());
uv.set_element(uv.size() - 1, last, this->stream());
EXPECT_EQ(first, uv.front_element(this->stream()));
EXPECT_EQ(last, uv.back_element(this->stream()));
}
| 36093b1a9a07f0af70bb7ac7618c050a3c2834d8.cu |
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <gtest/internal/gtest-type-util.h>
#include <rmm/device_uvector.hpp>
#include <thrust/logical.h>
template <typename T>
struct TypedUVectorTest : ::testing::Test {
cudaStream_t stream() const noexcept { return cudaStream_t{0}; }
};
using TestTypes = ::testing::Types<int8_t, int32_t, uint64_t, float, double>;
TYPED_TEST_CASE(TypedUVectorTest, TestTypes);
TYPED_TEST(TypedUVectorTest, ZeroSizeConstructor)
{
rmm::device_uvector<TypeParam> uv(0, this->stream());
EXPECT_EQ(uv.size(), 0);
EXPECT_EQ(uv.end(), uv.begin());
EXPECT_TRUE(uv.is_empty());
}
TYPED_TEST(TypedUVectorTest, NonZeroSizeConstructor)
{
rmm::device_uvector<TypeParam> uv(12345, this->stream());
EXPECT_EQ(uv.size(), 12345);
EXPECT_NE(uv.data(), nullptr);
EXPECT_EQ(uv.end(), uv.begin() + uv.size());
EXPECT_FALSE(uv.is_empty());
EXPECT_NE(uv.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, CopyConstructor)
{
rmm::device_uvector<TypeParam> uv(12345, this->stream());
rmm::device_uvector<TypeParam> uv_copy(uv, this->stream());
EXPECT_EQ(uv_copy.size(), uv.size());
EXPECT_NE(uv_copy.data(), nullptr);
EXPECT_EQ(uv_copy.end(), uv_copy.begin() + uv_copy.size());
EXPECT_FALSE(uv_copy.is_empty());
EXPECT_NE(uv_copy.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, ResizeSmaller)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
auto original_begin = uv.begin();
auto smaller_size = uv.size() - 1;
uv.resize(smaller_size, this->stream());
EXPECT_EQ(original_data, uv.data());
EXPECT_EQ(original_begin, uv.begin());
EXPECT_EQ(uv.size(), smaller_size);
EXPECT_EQ(uv.capacity(), original_size);
// shrink_to_fit should force a new allocation
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.size(), smaller_size);
EXPECT_EQ(uv.capacity(), smaller_size);
}
TYPED_TEST(TypedUVectorTest, ResizeLarger)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
auto original_begin = uv.begin();
auto larger_size = uv.size() + 1;
uv.resize(larger_size, this->stream());
EXPECT_NE(uv.data(), original_data);
EXPECT_NE(uv.begin(), original_begin);
EXPECT_EQ(uv.size(), larger_size);
EXPECT_EQ(uv.capacity(), larger_size);
auto larger_data = uv.data();
auto larger_begin = uv.begin();
// shrink_to_fit shouldn't have any effect
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.size(), larger_size);
EXPECT_EQ(uv.capacity(), larger_size);
EXPECT_EQ(uv.data(), larger_data);
EXPECT_EQ(uv.begin(), larger_begin);
}
TYPED_TEST(TypedUVectorTest, ResizeToZero)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
uv.resize(0, this->stream());
EXPECT_EQ(uv.size(), 0);
EXPECT_TRUE(uv.is_empty());
EXPECT_EQ(uv.capacity(), original_size);
uv.shrink_to_fit(this->stream());
EXPECT_EQ(uv.capacity(), 0);
}
TYPED_TEST(TypedUVectorTest, Release)
{
auto original_size = 12345;
rmm::device_uvector<TypeParam> uv(original_size, this->stream());
auto original_data = uv.data();
rmm::device_buffer storage = uv.release();
EXPECT_EQ(uv.size(), 0);
EXPECT_EQ(uv.capacity(), 0);
EXPECT_TRUE(uv.is_empty());
EXPECT_EQ(storage.data(), original_data);
EXPECT_EQ(storage.size(), original_size * sizeof(TypeParam));
}
TYPED_TEST(TypedUVectorTest, ElementPointer)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
EXPECT_NE(uv.element_ptr(i), nullptr);
}
}
TYPED_TEST(TypedUVectorTest, OOBSetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
EXPECT_THROW(uv.set_element(uv.size() + 1, 42, this->stream()), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, OOBGetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
EXPECT_THROW(uv.element(uv.size() + 1, this->stream()), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, GetSetElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
uv.set_element(i, i, this->stream());
EXPECT_EQ(static_cast<TypeParam>(i), uv.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, GetSetElementAsync)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
for (std::size_t i = 0; i < uv.size(); ++i) {
uv.set_element_async(i, i, this->stream());
EXPECT_EQ(static_cast<TypeParam>(i), uv.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, FrontBackElement)
{
auto size = 12345;
rmm::device_uvector<TypeParam> uv(size, this->stream());
auto first = TypeParam{42};
auto last = TypeParam{13};
uv.set_element(0, first, this->stream());
uv.set_element(uv.size() - 1, last, this->stream());
EXPECT_EQ(first, uv.front_element(this->stream()));
EXPECT_EQ(last, uv.back_element(this->stream()));
}
|
29f25a5b66c1a45f14a7d14e0cf128807622efa4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string>
#include <iostream>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <hip/hip_runtime.h>
//#include <time.h>
#include <stdlib.h>
#include <fstream>
#include <fstream>
#include <iomanip>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
int total_weight=0;
//extern "C"
// a structure to represent a weighted edge in graph
struct Edge
{
int src, dest, weight;
};
// a structure to represent a connected, undirected
// and weighted graph as a collection of edges.
struct Graph
{
// V-> Number of vertices, E-> Number of edges
int V, E;
// graph is represented as an array of edges.
// Since the graph is undirected, the edge
// from src to dest is also edge from dest
// to src. Both are counted as 1 edge here.
Edge* edge;
};
// A structure to represent a subset for union-find
struct subset
{
int parent;
int rank;
};
struct Graph* createGraph(int V, int E)
{
Graph* graph = (Graph*)malloc(sizeof(Graph));
graph->V = V;
graph->E = E;
graph->edge = (Edge *)malloc(E*sizeof(Edge));
return graph;
}
struct Graph* create_graph(char *filename);
__device__ int find(struct subset subsets[], int i);
__device__ void Union(struct subset subsets[], int x, int y);
__device__ int find(struct subset subsets[], int i)
{
// find root and make root as parent of i
// (path compression)
//printf("subsets[i].parent is = %d,i = %d",subsets[i].parent,i);
for (int k=i;k<100;k++)
{
if (subsets[i].parent == i)
return subsets[i].parent;
else
continue;
}
//printf("inside kernel\n");
return subsets[i].parent;
}
// A function that does union of two sets of x and y
// (uses union by rank)
__device__ void Union(struct subset subsets[], int x, int y)
{
int xroot = find(subsets, x);
int yroot = find(subsets, y);
// Attach smaller rank tree under root of high
// rank tree (Union by Rank)
if (subsets[xroot].rank < subsets[yroot].rank)
subsets[xroot].parent = yroot;
else if (subsets[xroot].rank > subsets[yroot].rank)
subsets[yroot].parent = xroot;
// If ranks are same, then make one as root and
// increment its rank by one
else
{
subsets[yroot].parent = xroot;
subsets[xroot].rank++;
}
}
__global__ void find_subsets(struct subset* subsets, Edge* edge, int* cheapest_in, int* cheapest_out, unsigned long long* runtime)
{
int tid=threadIdx.x;
unsigned long long start_time=clock64();
// Find components (or sets) of two corners
// of current edge
// printf("Kernel Address %u and tid:%d\n",edge,tid);
//printf ("edgesrc %d\n", edge[tid].src);
int set1 = find(subsets, edge[tid].src);
//printf("inside kernel\n");
int set2 = find(subsets, edge[tid].dest);
// If two corners of current edge belong to
// same set, ignore current edge
if (set1 == set2)
return;
// Else check if current edge is closer to previous
// cheapest edges of set1 and set2
if (cheapest_in[set1] == -1 ||
edge[cheapest_in[set1]].weight > edge[tid].weight)
cheapest_out[set1] = tid;
if (cheapest_in[set1] == -1 ||
edge[cheapest_in[set2]].weight > edge[tid].weight)
cheapest_out[set2] = tid;
//printf ("thread id %d\n",tid);
unsigned long long stop_time=clock64();
runtime[tid]=(unsigned long long)(stop_time-start_time);//runtime for each thread
}
__global__ void subsets_weight(struct subset* subsets, Edge* edge, int* cheapest_in, int* MSTweight, int* numTrees, unsigned long long* runtime)
{
int tid=threadIdx.x;
unsigned long long start_time=clock64();
// Check if cheapest for current set exists
if (cheapest_in[tid] != -1)
{
int set1 = find(subsets, edge[cheapest_in[tid]].src);
int set2 = find(subsets, edge[cheapest_in[tid]].dest);
// if (set1 != set2)
// continue;
MSTweight[tid] = edge[cheapest_in[tid]].weight;
// printf ("mst in kernel %d\n", MSTweight[tid]);
// printf("Edge %d-%d-%d included in MST\n",
// edge[cheapest[i]].src, edge[cheapest[i]].dest,
// edge[cheapest[i]].weight);
// Do a union of set1 and set2 and decrease number
// of trees
Union(subsets, set1, set2);//--
numTrees[tid]=0;
//printf ("inside ker\n");
}
else
{
MSTweight[tid]=0;
numTrees[tid]=1;
}
unsigned long long stop_time=clock64();
runtime[tid]=(unsigned long long)(stop_time-start_time);//runtime for each thread
}
/*
void boruvkaMST(struct Graph* graph)
{
}
*/
/**
* Host main routine
*/
//#define V 100
//#define E 100
#define EDGE E
#define VERTEX V
int main(int argc, char *argv[])
{
if (argc < 2) {
printf("Error: usage: %s <program_file_1> <program_file_2> ...\n",
argv[0]);
exit(1);
}
printf("boruvka Simulator\n\n");
// struct Graph* graph = create_graph(argv[1]);
char *file = argv[1];
char *line_arr = (char *)malloc(100*sizeof(char));
std::string line;
std::ifstream myfile (file);
int V, E;
getline(myfile, line);
strcpy(line_arr, line.c_str());
// printf("string is %s\n",line_arr);
char *graph_dims = strtok(line_arr, " ");
V = atoi(graph_dims);
//printf("Vertices %d\n",V);
graph_dims = strtok(NULL, " ");
E = atoi(graph_dims);
// printf("Edges %d\n",E);
Edge *edge = (Edge *)malloc(2*E*sizeof(Edge));
struct Graph* g = createGraph(V, 2*E);
// Graph g = instantiate_graph(V, 2 * E);
int offset_count = 0;
int temp;
while(getline (myfile, line))
{
int src=0, dest=0;
strcpy(line_arr, line.c_str());
// printf("string is %s\n",line_arr);
char* s = strtok(line_arr, " ");
src = atoi(s);
s = strtok(NULL, " ");
dest = atoi(s);
//printf("dest is %d\n",dest);
s = strtok(NULL, " ");
int weight = atoi(s);
total_weight+=weight;
//printf("weight is %d\n",weight);
g->edge[offset_count].src = src;
g->edge[offset_count].dest = dest;
g->edge[offset_count].weight = weight;
offset_count++;
// printf ("after ifstream %d\n", offset_count);
}
// printf ("after graph generation\n");
printf ("total weight %d\n", total_weight);
myfile.close();
free(line_arr);
//printf ("eof\n");
hipError_t err = hipSuccess;
// Get data of given graph
// int V = graph->V, E = graph->E;
edge = g->edge;
//printf ("Edge details %d\n", edge[5].weight);
// Allocate memory for creating V subsets.
// struct subset *subsets = new subset[V];
struct subset *subsets = (subset *)malloc(V*sizeof(subset));
// An array to store index of the cheapest edge of
// subset. The stored index for indexing array 'edge[]'
// int *cheapest = new int[V];
int *cheapest = (int *)malloc(V*sizeof(int));
// Create V subsets with single elements
for (int v = 0; v < V; ++v)//--
{
subsets[v].parent = v;
subsets[v].rank = 0;
cheapest[v] = -1;
}
//printf ("inside\n");
// Initially there are V different trees.
// Finally there will be one tree that will be MST
int numTrees = V;
int MSTweight = 0;
unsigned long long net_runtime=0;//stores the total execution time
subset *d_subsets;//=(subset *)malloc(V*sizeof(subset));
//printf ("inside1\n");
err = hipMalloc((void**)&d_subsets, V*sizeof(subset)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device subset data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf ("inside1\n");
Edge *d_edge;//=(Edge *)malloc(sizeof(Edge));
//printf ("inside2\n");
err = hipMalloc((void**)&d_edge, 2*E*sizeof(Edge)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device edge data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Edge *d_graph;//=(Edge *)malloc(sizeof(Edge));
//printf ("inside2\n");
/*
err = hipMalloc((void**)&d_graph, E*sizeof(Graph)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device graph data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
int *d_cheapest;// = (int *)malloc(100*sizeof(int));
err = hipMalloc((void**)&d_cheapest, V*sizeof(int)); // TODO: Allocate context in GPU device memory
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device cheapest data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf ("inside2\n");
int* d_cheapest_out;// = (int*)malloc(100*sizeof(int));
err = hipMalloc((void**)&d_cheapest_out, V*sizeof(int)); // TODO: Allocate context in GPU device memory
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device cheapest out data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("Here\n");
/*-------------runtime related memory allocation----------*/
unsigned long long* d_runtime;
int r_size = EDGE*VERTEX*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
hipMalloc((void**)&d_runtime, r_size);
/*-------------------------xxxxxxxxx-----------------------*/
int* d_MSTweight;
int mst_size = VERTEX*sizeof(int);
int* mst_weight = (int*)malloc(mst_size);
memset(mst_weight, 0, mst_size);
hipMalloc((void**)&d_MSTweight, mst_size);
/*---------------------------------xxxxxxxxxx----------------*/
int* d_numTrees;
int trees_size = VERTEX*sizeof(int);
int* num_of_trees = (int*)malloc(trees_size);
memset(num_of_trees, 0, trees_size);
hipMalloc((void**)&d_numTrees, trees_size);
/*------------------xxxxxxxxxxxxxx--------------------*/
// int EDGE = E;
// int VERTEX = V;
// Keep combining components (or sets) until all
// compnentes are not combined into single MST.
// printf("Number of trees %d\n",numTrees);
while (numTrees > 1)//--
{
// Traverse through all edges and update
// cheapest of every component
// printf("First for loop1");
err = hipMemcpy(d_subsets, subsets, V*sizeof(subset), hipMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy subset data from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_edge, edge, 2*E*sizeof(Edge), hipMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy edge data from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_cheapest, cheapest, V*sizeof(int), hipMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy cheapest data from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = hipMemcpy(d_numTrees, numTrees, sizeof(int), hipMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy numTrees data from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
dim3 dimGrid1(EDGE,1, 1);
dim3 dimBlock1(1, 1, 1);
//printf("Address %u\n",d_edge);
// Call the kernel function
hipLaunchKernelGGL(( find_subsets), dim3(dimBlock1),dim3(dimGrid1), 0, 0, d_subsets, d_edge, d_cheapest, d_cheapest_out, d_runtime);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Kernel-1 execution failed (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
//printf("Copy between kernel data from the CUDA device to the host memory\n");//copying the updated context from GPU to CPU
err = hipMemcpy(cheapest,d_cheapest_out, V*sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy between kernel data from device to host (error code %s)!\n", hipGetErrorString(err));
}
err = hipMemcpy(d_cheapest, cheapest, V*sizeof(int), hipMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy cheapest data second time from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
unsigned long long elapsed_time_EDGE = 0;
for(int i = 0; i < EDGE; i++)
if(elapsed_time_EDGE < runtime[i])
elapsed_time_EDGE = runtime[i];//highest execution time among all the simultaneously running threads
net_runtime += elapsed_time_EDGE;// calculates the total execution time, each time when the kernel is executed
dim3 dimGrid2(VERTEX,1, 1);
dim3 dimBlock2(1, 1, 1);
// Call the kernel function
hipLaunchKernelGGL(( subsets_weight), dim3(dimBlock2),dim3(dimGrid2), 0, 0, d_subsets, d_edge, d_cheapest, d_MSTweight, d_numTrees, d_runtime);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Kernel-2 execution failed (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
err = hipMemcpy(subsets, d_subsets, V*sizeof(subset), hipMemcpyDeviceToHost);// TODO: Copy the input/updated context to GPU
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy subset data from device to host device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost);//--
hipDeviceSynchronize();
unsigned long long elapsed_time_VER = 0;
for(int j = 0; j < VERTEX; j++)
if(elapsed_time_VER < runtime[j])
elapsed_time_VER = runtime[j];//highest execution time among all the simultaneously running threads
net_runtime += elapsed_time_VER;// calculates the total execution time, each time when the kernel is executed
hipMemcpy(num_of_trees, d_numTrees, trees_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int k = 0; k < VERTEX; k++)
numTrees += num_of_trees[k];
numTrees = numTrees/VERTEX;
//printf("num trees %d\n", numTrees);
//numTrees--;
//printf ("num of trees %d\n", numTrees);
// printf ("mst weight %d\n", d_MSTweight);
hipMemcpy(mst_weight, d_MSTweight, mst_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int temp_weight=0;
for(int n = 0; n < VERTEX; n++)
//if (temp_weight<mst_weight[n])
// temp_weight=mst_weight[n];
MSTweight += mst_weight[n];
}
hipFree(d_subsets);
hipFree(d_edge);
hipFree(d_cheapest);
hipFree(d_cheapest_out);
// printf("Weight of MST is %d\n", MSTweight);
printf("total run time in nanoseconds on GPU %d\n",int(net_runtime*8.17)/64);
return;
return 0;
}
///--------working function--------///
/*
struct Graph* create_graph(char *filename)
{
char *file = filename;
//(Edge *)malloc(sizeof(Edge))
char *line_arr = (char *)malloc(100*sizeof(char));
std::string line;
std::ifstream myfile (file);
// int V, E;
getline(myfile, line);
strcpy(line_arr, line.c_str());
printf("string is %s\n",line_arr);
char *graph_dims = strtok(line_arr, " ");
//V = atoi(graph_dims);
printf("Vertices %d\n",V);
graph_dims = strtok(NULL, " ");
//E = atoi(graph_dims);
printf("Edges %d\n",E);
struct Graph* g = createGraph(V, 2*E);
// Graph g = instantiate_graph(V, 2 * E);
int offset_count = 0;
int temp;
while(getline (myfile, line))
{
int src=0, dest=0;
strcpy(line_arr, line.c_str());
printf("string is %s\n",line_arr);
char* s = strtok(line_arr, " ");
src = atoi(s);
s = strtok(NULL, " ");
dest = atoi(s);
printf("dest is %d\n",dest);
s = strtok(NULL, " ");
int weight = atoi(s);
total_weight+=weight;
printf("weight is %d\n",weight);
g->edge[offset_count].src = src;
g->edge[offset_count].dest = dest;
g->edge[offset_count].weight = weight;
offset_count++;
printf ("after ifstream %d\n", offset_count);
}
printf ("after graph generation\n");
printf ("total weight %d\n", total_weight);
myfile.close();
free(line_arr);
printf ("eof\n");
return g;
}*/
| 29f25a5b66c1a45f14a7d14e0cf128807622efa4.cu |
#include <stdio.h>
#include <string>
#include <iostream>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <cuda_runtime.h>
//#include <time.h>
#include <stdlib.h>
#include <fstream>
#include <fstream>
#include <iomanip>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
int total_weight=0;
//extern "C"
// a structure to represent a weighted edge in graph
struct Edge
{
int src, dest, weight;
};
// a structure to represent a connected, undirected
// and weighted graph as a collection of edges.
struct Graph
{
// V-> Number of vertices, E-> Number of edges
int V, E;
// graph is represented as an array of edges.
// Since the graph is undirected, the edge
// from src to dest is also edge from dest
// to src. Both are counted as 1 edge here.
Edge* edge;
};
// A structure to represent a subset for union-find
struct subset
{
int parent;
int rank;
};
struct Graph* createGraph(int V, int E)
{
Graph* graph = (Graph*)malloc(sizeof(Graph));
graph->V = V;
graph->E = E;
graph->edge = (Edge *)malloc(E*sizeof(Edge));
return graph;
}
struct Graph* create_graph(char *filename);
__device__ int find(struct subset subsets[], int i);
__device__ void Union(struct subset subsets[], int x, int y);
__device__ int find(struct subset subsets[], int i)
{
// find root and make root as parent of i
// (path compression)
//printf("subsets[i].parent is = %d,i = %d",subsets[i].parent,i);
for (int k=i;k<100;k++)
{
if (subsets[i].parent == i)
return subsets[i].parent;
else
continue;
}
//printf("inside kernel\n");
return subsets[i].parent;
}
// A function that does union of two sets of x and y
// (uses union by rank)
__device__ void Union(struct subset subsets[], int x, int y)
{
int xroot = find(subsets, x);
int yroot = find(subsets, y);
// Attach smaller rank tree under root of high
// rank tree (Union by Rank)
if (subsets[xroot].rank < subsets[yroot].rank)
subsets[xroot].parent = yroot;
else if (subsets[xroot].rank > subsets[yroot].rank)
subsets[yroot].parent = xroot;
// If ranks are same, then make one as root and
// increment its rank by one
else
{
subsets[yroot].parent = xroot;
subsets[xroot].rank++;
}
}
__global__ void find_subsets(struct subset* subsets, Edge* edge, int* cheapest_in, int* cheapest_out, unsigned long long* runtime)
{
int tid=threadIdx.x;
unsigned long long start_time=clock64();
// Find components (or sets) of two corners
// of current edge
// printf("Kernel Address %u and tid:%d\n",edge,tid);
//printf ("edgesrc %d\n", edge[tid].src);
int set1 = find(subsets, edge[tid].src);
//printf("inside kernel\n");
int set2 = find(subsets, edge[tid].dest);
// If two corners of current edge belong to
// same set, ignore current edge
if (set1 == set2)
return;
// Else check if current edge is closer to previous
// cheapest edges of set1 and set2
if (cheapest_in[set1] == -1 ||
edge[cheapest_in[set1]].weight > edge[tid].weight)
cheapest_out[set1] = tid;
if (cheapest_in[set1] == -1 ||
edge[cheapest_in[set2]].weight > edge[tid].weight)
cheapest_out[set2] = tid;
//printf ("thread id %d\n",tid);
unsigned long long stop_time=clock64();
runtime[tid]=(unsigned long long)(stop_time-start_time);//runtime for each thread
}
__global__ void subsets_weight(struct subset* subsets, Edge* edge, int* cheapest_in, int* MSTweight, int* numTrees, unsigned long long* runtime)
{
int tid=threadIdx.x;
unsigned long long start_time=clock64();
// Check if cheapest for current set exists
if (cheapest_in[tid] != -1)
{
int set1 = find(subsets, edge[cheapest_in[tid]].src);
int set2 = find(subsets, edge[cheapest_in[tid]].dest);
// if (set1 != set2)
// continue;
MSTweight[tid] = edge[cheapest_in[tid]].weight;
// printf ("mst in kernel %d\n", MSTweight[tid]);
// printf("Edge %d-%d-%d included in MST\n",
// edge[cheapest[i]].src, edge[cheapest[i]].dest,
// edge[cheapest[i]].weight);
// Do a union of set1 and set2 and decrease number
// of trees
Union(subsets, set1, set2);//--
numTrees[tid]=0;
//printf ("inside ker\n");
}
else
{
MSTweight[tid]=0;
numTrees[tid]=1;
}
unsigned long long stop_time=clock64();
runtime[tid]=(unsigned long long)(stop_time-start_time);//runtime for each thread
}
/*
void boruvkaMST(struct Graph* graph)
{
}
*/
/**
* Host main routine
*/
//#define V 100
//#define E 100
#define EDGE E
#define VERTEX V
int main(int argc, char *argv[])
{
if (argc < 2) {
printf("Error: usage: %s <program_file_1> <program_file_2> ...\n",
argv[0]);
exit(1);
}
printf("boruvka Simulator\n\n");
// struct Graph* graph = create_graph(argv[1]);
char *file = argv[1];
char *line_arr = (char *)malloc(100*sizeof(char));
std::string line;
std::ifstream myfile (file);
int V, E;
getline(myfile, line);
strcpy(line_arr, line.c_str());
// printf("string is %s\n",line_arr);
char *graph_dims = strtok(line_arr, " ");
V = atoi(graph_dims);
//printf("Vertices %d\n",V);
graph_dims = strtok(NULL, " ");
E = atoi(graph_dims);
// printf("Edges %d\n",E);
Edge *edge = (Edge *)malloc(2*E*sizeof(Edge));
struct Graph* g = createGraph(V, 2*E);
// Graph g = instantiate_graph(V, 2 * E);
int offset_count = 0;
int temp;
while(getline (myfile, line))
{
int src=0, dest=0;
strcpy(line_arr, line.c_str());
// printf("string is %s\n",line_arr);
char* s = strtok(line_arr, " ");
src = atoi(s);
s = strtok(NULL, " ");
dest = atoi(s);
//printf("dest is %d\n",dest);
s = strtok(NULL, " ");
int weight = atoi(s);
total_weight+=weight;
//printf("weight is %d\n",weight);
g->edge[offset_count].src = src;
g->edge[offset_count].dest = dest;
g->edge[offset_count].weight = weight;
offset_count++;
// printf ("after ifstream %d\n", offset_count);
}
// printf ("after graph generation\n");
printf ("total weight %d\n", total_weight);
myfile.close();
free(line_arr);
//printf ("eof\n");
cudaError_t err = cudaSuccess;
// Get data of given graph
// int V = graph->V, E = graph->E;
edge = g->edge;
//printf ("Edge details %d\n", edge[5].weight);
// Allocate memory for creating V subsets.
// struct subset *subsets = new subset[V];
struct subset *subsets = (subset *)malloc(V*sizeof(subset));
// An array to store index of the cheapest edge of
// subset. The stored index for indexing array 'edge[]'
// int *cheapest = new int[V];
int *cheapest = (int *)malloc(V*sizeof(int));
// Create V subsets with single elements
for (int v = 0; v < V; ++v)//--
{
subsets[v].parent = v;
subsets[v].rank = 0;
cheapest[v] = -1;
}
//printf ("inside\n");
// Initially there are V different trees.
// Finally there will be one tree that will be MST
int numTrees = V;
int MSTweight = 0;
unsigned long long net_runtime=0;//stores the total execution time
subset *d_subsets;//=(subset *)malloc(V*sizeof(subset));
//printf ("inside1\n");
err = cudaMalloc((void**)&d_subsets, V*sizeof(subset)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device subset data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf ("inside1\n");
Edge *d_edge;//=(Edge *)malloc(sizeof(Edge));
//printf ("inside2\n");
err = cudaMalloc((void**)&d_edge, 2*E*sizeof(Edge)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device edge data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Edge *d_graph;//=(Edge *)malloc(sizeof(Edge));
//printf ("inside2\n");
/*
err = cudaMalloc((void**)&d_graph, E*sizeof(Graph)); // TODO: Allocate context in GPU device memory
//printf ("inside2\n");
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device graph data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
int *d_cheapest;// = (int *)malloc(100*sizeof(int));
err = cudaMalloc((void**)&d_cheapest, V*sizeof(int)); // TODO: Allocate context in GPU device memory
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device cheapest data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf ("inside2\n");
int* d_cheapest_out;// = (int*)malloc(100*sizeof(int));
err = cudaMalloc((void**)&d_cheapest_out, V*sizeof(int)); // TODO: Allocate context in GPU device memory
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device cheapest out data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("Here\n");
/*-------------runtime related memory allocation----------*/
unsigned long long* d_runtime;
int r_size = EDGE*VERTEX*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
cudaMalloc((void**)&d_runtime, r_size);
/*-------------------------xxxxxxxxx-----------------------*/
int* d_MSTweight;
int mst_size = VERTEX*sizeof(int);
int* mst_weight = (int*)malloc(mst_size);
memset(mst_weight, 0, mst_size);
cudaMalloc((void**)&d_MSTweight, mst_size);
/*---------------------------------xxxxxxxxxx----------------*/
int* d_numTrees;
int trees_size = VERTEX*sizeof(int);
int* num_of_trees = (int*)malloc(trees_size);
memset(num_of_trees, 0, trees_size);
cudaMalloc((void**)&d_numTrees, trees_size);
/*------------------xxxxxxxxxxxxxx--------------------*/
// int EDGE = E;
// int VERTEX = V;
// Keep combining components (or sets) until all
// compnentes are not combined into single MST.
// printf("Number of trees %d\n",numTrees);
while (numTrees > 1)//--
{
// Traverse through all edges and update
// cheapest of every component
// printf("First for loop1");
err = cudaMemcpy(d_subsets, subsets, V*sizeof(subset), cudaMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy subset data from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_edge, edge, 2*E*sizeof(Edge), cudaMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy edge data from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_cheapest, cheapest, V*sizeof(int), cudaMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy cheapest data from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = cudaMemcpy(d_numTrees, numTrees, sizeof(int), cudaMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy numTrees data from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
dim3 dimGrid1(EDGE,1, 1);
dim3 dimBlock1(1, 1, 1);
//printf("Address %u\n",d_edge);
// Call the kernel function
find_subsets<<<dimBlock1,dimGrid1>>>(d_subsets, d_edge, d_cheapest, d_cheapest_out, d_runtime);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Kernel-1 execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
//printf("Copy between kernel data from the CUDA device to the host memory\n");//copying the updated context from GPU to CPU
err = cudaMemcpy(cheapest,d_cheapest_out, V*sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy between kernel data from device to host (error code %s)!\n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_cheapest, cheapest, V*sizeof(int), cudaMemcpyHostToDevice);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy cheapest data second time from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
unsigned long long elapsed_time_EDGE = 0;
for(int i = 0; i < EDGE; i++)
if(elapsed_time_EDGE < runtime[i])
elapsed_time_EDGE = runtime[i];//highest execution time among all the simultaneously running threads
net_runtime += elapsed_time_EDGE;// calculates the total execution time, each time when the kernel is executed
dim3 dimGrid2(VERTEX,1, 1);
dim3 dimBlock2(1, 1, 1);
// Call the kernel function
subsets_weight<<<dimBlock2,dimGrid2>>>(d_subsets, d_edge, d_cheapest, d_MSTweight, d_numTrees, d_runtime);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Kernel-2 execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
err = cudaMemcpy(subsets, d_subsets, V*sizeof(subset), cudaMemcpyDeviceToHost);// TODO: Copy the input/updated context to GPU
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy subset data from device to host device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost);//--
cudaThreadSynchronize();
unsigned long long elapsed_time_VER = 0;
for(int j = 0; j < VERTEX; j++)
if(elapsed_time_VER < runtime[j])
elapsed_time_VER = runtime[j];//highest execution time among all the simultaneously running threads
net_runtime += elapsed_time_VER;// calculates the total execution time, each time when the kernel is executed
cudaMemcpy(num_of_trees, d_numTrees, trees_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
for(int k = 0; k < VERTEX; k++)
numTrees += num_of_trees[k];
numTrees = numTrees/VERTEX;
//printf("num trees %d\n", numTrees);
//numTrees--;
//printf ("num of trees %d\n", numTrees);
// printf ("mst weight %d\n", d_MSTweight);
cudaMemcpy(mst_weight, d_MSTweight, mst_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
int temp_weight=0;
for(int n = 0; n < VERTEX; n++)
//if (temp_weight<mst_weight[n])
// temp_weight=mst_weight[n];
MSTweight += mst_weight[n];
}
cudaFree(d_subsets);
cudaFree(d_edge);
cudaFree(d_cheapest);
cudaFree(d_cheapest_out);
// printf("Weight of MST is %d\n", MSTweight);
printf("total run time in nanoseconds on GPU %d\n",int(net_runtime*8.17)/64);
return;
return 0;
}
///--------working function--------///
/*
struct Graph* create_graph(char *filename)
{
char *file = filename;
//(Edge *)malloc(sizeof(Edge))
char *line_arr = (char *)malloc(100*sizeof(char));
std::string line;
std::ifstream myfile (file);
// int V, E;
getline(myfile, line);
strcpy(line_arr, line.c_str());
printf("string is %s\n",line_arr);
char *graph_dims = strtok(line_arr, " ");
//V = atoi(graph_dims);
printf("Vertices %d\n",V);
graph_dims = strtok(NULL, " ");
//E = atoi(graph_dims);
printf("Edges %d\n",E);
struct Graph* g = createGraph(V, 2*E);
// Graph g = instantiate_graph(V, 2 * E);
int offset_count = 0;
int temp;
while(getline (myfile, line))
{
int src=0, dest=0;
strcpy(line_arr, line.c_str());
printf("string is %s\n",line_arr);
char* s = strtok(line_arr, " ");
src = atoi(s);
s = strtok(NULL, " ");
dest = atoi(s);
printf("dest is %d\n",dest);
s = strtok(NULL, " ");
int weight = atoi(s);
total_weight+=weight;
printf("weight is %d\n",weight);
g->edge[offset_count].src = src;
g->edge[offset_count].dest = dest;
g->edge[offset_count].weight = weight;
offset_count++;
printf ("after ifstream %d\n", offset_count);
}
printf ("after graph generation\n");
printf ("total weight %d\n", total_weight);
myfile.close();
free(line_arr);
printf ("eof\n");
return g;
}*/
|
599249faddff66b9dffa18423291ba56b7be7090.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BlockDim = 16 x 16
// GridDim = w / 16 * h /16
#include "..\..\LibCore\Glob.hpp"
#include "..\..\LibCore\GpuMat.hpp"
using namespace cg;
using namespace cg::core;
#define USE_MSDN
extern "C" __global__ void InterleaveUV(unsigned char * yuv_cb, unsigned char * yuv_cr, unsigned char * nv12_chroma, int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch){
int x = 0, y = 0;
unsigned char * pCb;
unsigned char * pCr;
unsigned char * pDst;
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
if((x < chroma_width) && (y < chroma_height)){
pCb = yuv_cb + (y * cb_pitch);
pCr = yuv_cr + (y * cr_pitch);
pDst = nv12_chroma + y * nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
}
}
__device__ __forceinline__ void rgb_to_y(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y){
#ifndef USE_MSDN
y = static_cast<unsigned char>(((int)(30 * r) + (int)(59 * g)+(int)(11 *b))/100);
#else
y = static_cast<unsigned char>((((int)(66 * r) + (int)(129 * g) + (int)( 25 * b) + 128) >> 8) + 16);
#endif
}
__device__ __forceinline__ void rgb_to_yuv(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y, unsigned char & u, unsigned char & v){
rgb_to_y(b, g, r, y);
#ifndef USE_MSDN
u = static_cast<unsigned char>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<unsigned char>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
#else
u = static_cast<unsigned char>((((int)(-38 * r) - (int)(74 * g) + (int)(112 * b) + 128)>>8)+128);
v = static_cast<unsigned char>((((int)(112 * r) - (int)(94 * g) - (int)(19 * b) + 128)>>8)+ 128);
#endif
}
#if 0
template <typename T>
__global__ void _BGRAMatToNV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if( x + 1 >= src.cols || y + 1 >= src.rows)
return;
const size_t planeSize = src.rows * dst.step;
unsigned char * y_plane = dst.data;
unsigned char * u_plane = y_plane + planeSize;
int uvOff = 0;
T pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[y * dst.step + x] = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[y * dst.step + x + 1] = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[(y + 1) * dst.step + x] = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
y_plane[(y + 1) * dst.step + x + 1] = y_val;
uvOff = y / 2 * dst.step + x / 2 * 2;
u_plane[uvOff] = u_val;
u_plane[uvOff + 1] = v_val;
}
extern "C" __global__ void RGBAToNV12(const GpuMat & src, GpuMat & dst){
switch(src.channels()){
case 3:
_BGRAMatToNV12(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
_BGRAMatToNV12(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
}
extern "C" __global__ void RGBToNV12(const GpuMat & src, GpuMat & dst){
switch(src.channels()){
case 3:
_BGRAMatToNV12(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
_BGRAMatToNV12(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
default:
break;
}
}
#endif
extern "C" __global__ void RGBAToNV12_2(unsigned char * pARGB, unsigned char * pNV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
int rgbaSize = 4;
if (x + 1 >= width || y + 1 >= height)
return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pNV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int uv_off = 0;
#if 0
// the (x, y)
r = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
b = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
#else
// the (x, y)
b = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
r = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, yb
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
#endif
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 2) * dstPitch + x / 2 * 2;
u_plane[ uv_off ] = u_val;
u_plane[ uv_off + 1] = v_val;
}
extern "C" __global__ void RGBToNV12_2(unsigned char * pRGB, unsigned char * pNV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
int rgbaSize = 3;
if (x + 1 >= width * rgbaSize || y + 1 >= height)
return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pNV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int uv_off = 0;
// the (x, y)
r = pRGB[ y * srcPitch + x * rgbaSize + 0];
g = pRGB[ y * srcPitch + x * rgbaSize + 1];
b = pRGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pRGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pRGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pRGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 2) * dstPitch + x / 2 * 2;
u_plane[ uv_off ] = u_val;
u_plane[ uv_off + 1] = v_val;
}
| 599249faddff66b9dffa18423291ba56b7be7090.cu | // BlockDim = 16 x 16
// GridDim = w / 16 * h /16
#include "..\..\LibCore\Glob.hpp"
#include "..\..\LibCore\GpuMat.hpp"
using namespace cg;
using namespace cg::core;
#define USE_MSDN
extern "C" __global__ void InterleaveUV(unsigned char * yuv_cb, unsigned char * yuv_cr, unsigned char * nv12_chroma, int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch){
int x = 0, y = 0;
unsigned char * pCb;
unsigned char * pCr;
unsigned char * pDst;
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
if((x < chroma_width) && (y < chroma_height)){
pCb = yuv_cb + (y * cb_pitch);
pCr = yuv_cr + (y * cr_pitch);
pDst = nv12_chroma + y * nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
}
}
__device__ __forceinline__ void rgb_to_y(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y){
#ifndef USE_MSDN
y = static_cast<unsigned char>(((int)(30 * r) + (int)(59 * g)+(int)(11 *b))/100);
#else
y = static_cast<unsigned char>((((int)(66 * r) + (int)(129 * g) + (int)( 25 * b) + 128) >> 8) + 16);
#endif
}
__device__ __forceinline__ void rgb_to_yuv(const unsigned char b, const unsigned char g, const unsigned char r, unsigned char & y, unsigned char & u, unsigned char & v){
rgb_to_y(b, g, r, y);
#ifndef USE_MSDN
u = static_cast<unsigned char>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<unsigned char>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
#else
u = static_cast<unsigned char>((((int)(-38 * r) - (int)(74 * g) + (int)(112 * b) + 128)>>8)+128);
v = static_cast<unsigned char>((((int)(112 * r) - (int)(94 * g) - (int)(19 * b) + 128)>>8)+ 128);
#endif
}
#if 0
template <typename T>
__global__ void _BGRAMatToNV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if( x + 1 >= src.cols || y + 1 >= src.rows)
return;
const size_t planeSize = src.rows * dst.step;
unsigned char * y_plane = dst.data;
unsigned char * u_plane = y_plane + planeSize;
int uvOff = 0;
T pix;
unsigned char y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[y * dst.step + x] = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[y * dst.step + x + 1] = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.x, pix.y, pix.z, y_val);
y_plane[(y + 1) * dst.step + x] = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.x, pix.y, pix.z, y_val, u_val, v_val);
y_plane[(y + 1) * dst.step + x + 1] = y_val;
uvOff = y / 2 * dst.step + x / 2 * 2;
u_plane[uvOff] = u_val;
u_plane[uvOff + 1] = v_val;
}
extern "C" __global__ void RGBAToNV12(const GpuMat & src, GpuMat & dst){
switch(src.channels()){
case 3:
_BGRAMatToNV12(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
_BGRAMatToNV12(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
}
extern "C" __global__ void RGBToNV12(const GpuMat & src, GpuMat & dst){
switch(src.channels()){
case 3:
_BGRAMatToNV12(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
_BGRAMatToNV12(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
default:
break;
}
}
#endif
extern "C" __global__ void RGBAToNV12_2(unsigned char * pARGB, unsigned char * pNV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
int rgbaSize = 4;
if (x + 1 >= width || y + 1 >= height)
return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pNV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int uv_off = 0;
#if 0
// the (x, y)
r = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
b = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
#else
// the (x, y)
b = pARGB[ y * srcPitch + x * rgbaSize + 0];
g = pARGB[ y * srcPitch + x * rgbaSize + 1];
r = pARGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, yb
b = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
r = pARGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
b = pARGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + x * rgbaSize + 1];
r = pARGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
b = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
r = pARGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
#endif
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 2) * dstPitch + x / 2 * 2;
u_plane[ uv_off ] = u_val;
u_plane[ uv_off + 1] = v_val;
}
extern "C" __global__ void RGBToNV12_2(unsigned char * pRGB, unsigned char * pNV, int srcPitch, int dstPitch, int width, int height){
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
int rgbaSize = 3;
if (x + 1 >= width * rgbaSize || y + 1 >= height)
return;
const int planeSize = height * dstPitch;
unsigned char * y_plane = pNV;
unsigned char * u_plane = y_plane + planeSize;
unsigned char y_val, u_val, v_val;
unsigned char r, g, b;
int uv_off = 0;
// the (x, y)
r = pRGB[ y * srcPitch + x * rgbaSize + 0];
g = pRGB[ y * srcPitch + x * rgbaSize + 1];
b = pRGB[ y * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x] = y_val;
// the (x + 1, y)
r = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 0];
g = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 1];
b = pRGB[ y * srcPitch + (x + 1) * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[y * dstPitch + x + 1] = y_val;
// the (x , y + 1)
r = pRGB[ (y+1) * srcPitch + x * rgbaSize + 0];
g = pRGB[ (y+1) * srcPitch + x * rgbaSize + 1];
b = pRGB[ (y+1) * srcPitch + x * rgbaSize + 2];
rgb_to_y(b, g, r, y_val);
y_plane[ (y+1) * dstPitch + x] = y_val;
// the (x +1, y + 1)
r = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 0];
g = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 1];
b = pRGB[ (y+1) * srcPitch + (x+1) * rgbaSize + 2];
rgb_to_yuv(b, g, r, y_val, u_val, v_val);
y_plane[ (y + 1) * dstPitch + x + 1] = y_val;
uv_off = (y / 2) * dstPitch + x / 2 * 2;
u_plane[ uv_off ] = u_val;
u_plane[ uv_off + 1] = v_val;
}
|
23909e6fea6a4f13725773367ee5d9074097fad2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Building xyz -> idx sparse tensor mapping
Written by Jiageng Mao
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "build_mapping_gpu.h"
#include "votr_cuda_utils.h"
// 32 bit Murmur3 hash
// unsigned int -> int, k >= 0, hash_size >0, should be ok?
__device__ int murmur_hash(int k, int hash_size) {
k ^= k >> 16;
k *= 0x85ebca6b;
k ^= k >> 13;
k *= 0xc2b2ae35;
k ^= k >> 16;
//return k & (hash_size-1);
return k % hash_size;
}
__device__ int hash(int k, int hash_size) {
return k % hash_size;
}
__device__ void hash_table_insert(int &key, int &value, int &hash_size, int *xyz_to_vidx) {
/*
xyz_to_idx (hash_size, 2) NO BATCH SIZE
*/
int hash_idx = hash(key, hash_size);
int prob_cnt = 0;
while(true) {
int prev_key = atomicCAS(xyz_to_vidx + hash_idx*2 + 0, EMPTY_KEY, key); // insert key when empty
if (prev_key == EMPTY_KEY || prev_key == key) {
xyz_to_vidx[hash_idx*2 + 1] = value; // insert value
break;
}
// linear probing
hash_idx = (hash_idx + 1) % hash_size;
// security in case of dead loop
prob_cnt += 1;
if (prob_cnt >= hash_size) break;
}
}
__global__ void downsample_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
ds_v_indices: [bs, num_ds_voxels, 3] downsampled voxels, -1 if not unique
xyz_to_vidx: [bs, x_max, y_max, z_max] downsampled dense map
vcount: [bs]
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int ds_z_idx = z_idx / z_stride;
int ds_y_idx = y_idx / y_stride;
int ds_x_idx = x_idx / x_stride;
if (ds_x_idx >= x_max || ds_x_idx < 0 || ds_y_idx < 0 || ds_y_idx >= y_max || ds_z_idx < 0 || ds_z_idx >= z_max) return;
xyz_to_vidx += bs_idx * x_max * y_max * z_max;
ds_v_indices += bs_idx * num_ds_voxels * 3;
int ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, BLK_SIGNAL);
if (ret_v == BLK_SIGNAL){ // kill all block threads
return;
} else if (ret_v != EMPTY_KEY) { // already occupied
ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, ret_v);
return;
} else if (ret_v == EMPTY_KEY) {
int v_idx = atomicAdd(vcount + bs_idx, 1);
ds_v_indices[v_idx * 3 + 0] = ds_z_idx;
ds_v_indices[v_idx * 3 + 1] = ds_y_idx;
ds_v_indices[v_idx * 3 + 2] = ds_x_idx;
ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, v_idx);
return;
}
}
void downsample_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
hipError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( downsample_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride,
num_voxels, num_ds_voxels,
v_indices, ds_v_indices, xyz_to_vidx, vcount);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void downsample_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels, int hash_size,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
ds_v_indices: [bs, num_ds_voxels, 3] downsampled voxels, -1 if not unique
xyz_to_vidx: [bs, hash_size, 2] downsampled dense map
vcount: [bs]
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int ds_z_idx = z_idx / z_stride;
int ds_y_idx = y_idx / y_stride;
int ds_x_idx = x_idx / x_stride;
if (ds_x_idx >= x_max || ds_x_idx < 0 || ds_y_idx < 0 || ds_y_idx >= y_max || ds_z_idx < 0 || ds_z_idx >= z_max) return;
xyz_to_vidx += bs_idx * hash_size * 2;
ds_v_indices += bs_idx * num_ds_voxels * 3;
int key = ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx;
// hash table with force insert, reject duplicates
int hash_idx = hash(key, hash_size);
int prob_cnt = 0;
while(true) {
int prev_key = atomicCAS(xyz_to_vidx + hash_idx*2 + 0, EMPTY_KEY, key); // insert key when empty
if (prev_key == EMPTY_KEY) {
int v_idx = atomicAdd(vcount + bs_idx, 1);
ds_v_indices[v_idx * 3 + 0] = ds_z_idx; // insert zyx to ds_indices
ds_v_indices[v_idx * 3 + 1] = ds_y_idx;
ds_v_indices[v_idx * 3 + 2] = ds_x_idx;
xyz_to_vidx[hash_idx*2 + 1] = v_idx; // insert value to hash table
break;
} else if (prev_key == key) { // already occupied
break;
}
// linear probing
hash_idx = (hash_idx + 1) % hash_size;
// security in case of dead loop
prob_cnt += 1;
if (prob_cnt >= hash_size) break;
}
}
void downsample_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels, int hash_size,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
hipError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( downsample_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride,
num_voxels, num_ds_voxels, hash_size,
v_indices, ds_v_indices, xyz_to_vidx, vcount);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void build_mapping_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return;
int v_sum = 0;
int bs_cnt = bs_idx - 1;
while(bs_cnt >= 0){
v_sum += v_bs_cnt[bs_cnt];
bs_cnt--;
}
int v_idx = th_idx - v_sum; // v_idx for this sample
xyz_to_vidx[bs_idx * x_max * y_max * z_max + x_idx * y_max * z_max + y_idx * z_max + z_idx] = v_idx;
}
void build_mapping_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx){
hipError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( build_mapping_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, v_indices, v_bs_cnt, xyz_to_vidx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void build_mapping_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int hash_size,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx) {
/*
v_indices: [N1+N2, 4] bs zyx indices of voxels
v_bs_cnt: [bs] num_voxels in each sample
xyz_to_vidx: [B, hash_size, 2] hash table key-value for dim-2
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int v_sum = 0;
int bs_cnt = bs_idx - 1;
while(bs_cnt >= 0){
v_sum += v_bs_cnt[bs_cnt];
bs_cnt--;
}
int v_idx = th_idx - v_sum; // v_idx for this sample
xyz_to_vidx += bs_idx * hash_size * 2;
if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; // out of bound
// key -> [x_max, y_max, z_max] value -> v_idx
int key = x_idx * y_max * z_max + y_idx * z_max + z_idx;
hash_table_insert(key, v_idx, hash_size, xyz_to_vidx);
return;
}
void build_mapping_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int hash_size,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx){
hipError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( build_mapping_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, hash_size,
v_indices, v_bs_cnt, xyz_to_vidx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 23909e6fea6a4f13725773367ee5d9074097fad2.cu | /*
Building xyz -> idx sparse tensor mapping
Written by Jiageng Mao
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "build_mapping_gpu.h"
#include "votr_cuda_utils.h"
// 32 bit Murmur3 hash
// unsigned int -> int, k >= 0, hash_size >0, should be ok?
__device__ int murmur_hash(int k, int hash_size) {
k ^= k >> 16;
k *= 0x85ebca6b;
k ^= k >> 13;
k *= 0xc2b2ae35;
k ^= k >> 16;
//return k & (hash_size-1);
return k % hash_size;
}
__device__ int hash(int k, int hash_size) {
return k % hash_size;
}
__device__ void hash_table_insert(int &key, int &value, int &hash_size, int *xyz_to_vidx) {
/*
xyz_to_idx (hash_size, 2) NO BATCH SIZE
*/
int hash_idx = hash(key, hash_size);
int prob_cnt = 0;
while(true) {
int prev_key = atomicCAS(xyz_to_vidx + hash_idx*2 + 0, EMPTY_KEY, key); // insert key when empty
if (prev_key == EMPTY_KEY || prev_key == key) {
xyz_to_vidx[hash_idx*2 + 1] = value; // insert value
break;
}
// linear probing
hash_idx = (hash_idx + 1) % hash_size;
// security in case of dead loop
prob_cnt += 1;
if (prob_cnt >= hash_size) break;
}
}
__global__ void downsample_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
ds_v_indices: [bs, num_ds_voxels, 3] downsampled voxels, -1 if not unique
xyz_to_vidx: [bs, x_max, y_max, z_max] downsampled dense map
vcount: [bs]
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int ds_z_idx = z_idx / z_stride;
int ds_y_idx = y_idx / y_stride;
int ds_x_idx = x_idx / x_stride;
if (ds_x_idx >= x_max || ds_x_idx < 0 || ds_y_idx < 0 || ds_y_idx >= y_max || ds_z_idx < 0 || ds_z_idx >= z_max) return;
xyz_to_vidx += bs_idx * x_max * y_max * z_max;
ds_v_indices += bs_idx * num_ds_voxels * 3;
int ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, BLK_SIGNAL);
if (ret_v == BLK_SIGNAL){ // kill all block threads
return;
} else if (ret_v != EMPTY_KEY) { // already occupied
ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, ret_v);
return;
} else if (ret_v == EMPTY_KEY) {
int v_idx = atomicAdd(vcount + bs_idx, 1);
ds_v_indices[v_idx * 3 + 0] = ds_z_idx;
ds_v_indices[v_idx * 3 + 1] = ds_y_idx;
ds_v_indices[v_idx * 3 + 2] = ds_x_idx;
ret_v = atomicExch(xyz_to_vidx + ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx, v_idx);
return;
}
}
void downsample_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
cudaError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
downsample_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride,
num_voxels, num_ds_voxels,
v_indices, ds_v_indices, xyz_to_vidx, vcount);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void downsample_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels, int hash_size,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
ds_v_indices: [bs, num_ds_voxels, 3] downsampled voxels, -1 if not unique
xyz_to_vidx: [bs, hash_size, 2] downsampled dense map
vcount: [bs]
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int ds_z_idx = z_idx / z_stride;
int ds_y_idx = y_idx / y_stride;
int ds_x_idx = x_idx / x_stride;
if (ds_x_idx >= x_max || ds_x_idx < 0 || ds_y_idx < 0 || ds_y_idx >= y_max || ds_z_idx < 0 || ds_z_idx >= z_max) return;
xyz_to_vidx += bs_idx * hash_size * 2;
ds_v_indices += bs_idx * num_ds_voxels * 3;
int key = ds_x_idx * y_max * z_max + ds_y_idx * z_max + ds_z_idx;
// hash table with force insert, reject duplicates
int hash_idx = hash(key, hash_size);
int prob_cnt = 0;
while(true) {
int prev_key = atomicCAS(xyz_to_vidx + hash_idx*2 + 0, EMPTY_KEY, key); // insert key when empty
if (prev_key == EMPTY_KEY) {
int v_idx = atomicAdd(vcount + bs_idx, 1);
ds_v_indices[v_idx * 3 + 0] = ds_z_idx; // insert zyx to ds_indices
ds_v_indices[v_idx * 3 + 1] = ds_y_idx;
ds_v_indices[v_idx * 3 + 2] = ds_x_idx;
xyz_to_vidx[hash_idx*2 + 1] = v_idx; // insert value to hash table
break;
} else if (prev_key == key) { // already occupied
break;
}
// linear probing
hash_idx = (hash_idx + 1) % hash_size;
// security in case of dead loop
prob_cnt += 1;
if (prob_cnt >= hash_size) break;
}
}
void downsample_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride,
int num_voxels, int num_ds_voxels, int hash_size,
const int *v_indices, int *ds_v_indices, int *xyz_to_vidx, int *vcount) {
cudaError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
downsample_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride,
num_voxels, num_ds_voxels, hash_size,
v_indices, ds_v_indices, xyz_to_vidx, vcount);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void build_mapping_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx) {
/*
v_indices: [num_voxels, 4] bs + zyx indices of voxels
xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return;
int v_sum = 0;
int bs_cnt = bs_idx - 1;
while(bs_cnt >= 0){
v_sum += v_bs_cnt[bs_cnt];
bs_cnt--;
}
int v_idx = th_idx - v_sum; // v_idx for this sample
xyz_to_vidx[bs_idx * x_max * y_max * z_max + x_idx * y_max * z_max + y_idx * z_max + z_idx] = v_idx;
}
void build_mapping_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx){
cudaError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
build_mapping_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, v_indices, v_bs_cnt, xyz_to_vidx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void build_mapping_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int hash_size,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx) {
/*
v_indices: [N1+N2, 4] bs zyx indices of voxels
v_bs_cnt: [bs] num_voxels in each sample
xyz_to_vidx: [B, hash_size, 2] hash table key-value for dim-2
*/
int th_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (th_idx >= num_voxels) return;
int bs_idx = v_indices[th_idx * 4 + 0];
int z_idx = v_indices[th_idx * 4 + 1];
int y_idx = v_indices[th_idx * 4 + 2];
int x_idx = v_indices[th_idx * 4 + 3];
int v_sum = 0;
int bs_cnt = bs_idx - 1;
while(bs_cnt >= 0){
v_sum += v_bs_cnt[bs_cnt];
bs_cnt--;
}
int v_idx = th_idx - v_sum; // v_idx for this sample
xyz_to_vidx += bs_idx * hash_size * 2;
if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; // out of bound
// key -> [x_max, y_max, z_max] value -> v_idx
int key = x_idx * y_max * z_max + y_idx * z_max + z_idx;
hash_table_insert(key, v_idx, hash_size, xyz_to_vidx);
return;
}
void build_mapping_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int hash_size,
const int *v_indices, const int *v_bs_cnt, int *xyz_to_vidx){
cudaError_t err;
dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
build_mapping_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, hash_size,
v_indices, v_bs_cnt, xyz_to_vidx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
e603e310e6d3a98f08456fdea19a7222067288c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iomanip>
#include <omp.h>
#include <types.h>
#include <my_errors.h>
#include <kernel.h>
#include <functions.h>
#include <utilis.h>
using namespace std;
//extern "C"
HostError Hermite6th(const double TTIME, double* GTIME, double* ATIME, double* local_time, double* step, const unsigned int N, const unsigned int M, double4* pos_PH, float4* vel_PH, double4* pos_CH, double4* vel_CH, double4* a_H0, const unsigned int MAXDIM, unsigned int NGPU, unsigned int TPB, int rank, int size, unsigned int BFMAX, double ETA6, double ETA4, double DTMAX, double DTMIN, double EPS, double DTPRINT, unsigned int FMAX, const bool warm, double GTW, unsigned int GPUMINTHREADS, double plummer_core, double plummer_mass, double rscale, double mscale, vector<unsigned int> devices, bool *cleanstop, string path){
int i = 0;
unsigned int ompthreads = 1; // N must be integer multiple of this number
omp_set_num_threads( ompthreads );
unsigned int* vetint = new unsigned int [N];
unsigned int *counter = new unsigned int [ompthreads];
int *next = new int [N];
unsigned long nextsize = N;
double NEXTOUT = DTPRINT;
double4 **pos_PD = new double4* [NGPU];
float4 **vel_PD = new float4* [NGPU];
float4 **acc_PD = new float4* [NGPU];
double4 **pos_CD = new double4* [NGPU];
double4 **vel_CD = new double4* [NGPU];
int **next_D = new int* [NGPU];
double **loc_D = new double* [NGPU];
#ifdef APE
double **step_D = new double* [NGPU];
#endif
#ifdef GPUCORR
double **step_D = new double* [NGPU];
#endif
double4 **a_D = new double4* [NGPU];
double4 **a1_D = new double4* [NGPU];
double4 **a2_D = new double4* [NGPU];
double4 **a_tot_D = new double4* [NGPU];
double4 **a1_tot_D = new double4* [NGPU];
double4 **a2_tot_D = new double4* [NGPU];
double4 **a3_D = new double4* [NGPU];
double4 **p_v_a3_Dev = new double4* [NGPU];
double4 **a_temp_Dev = new double4* [NGPU];
unsigned int malloc_size = MAXDIM*sizeof(double4); //it contains a, adot, a2dots sequentially
unsigned int malloc_db4 = nextsize*sizeof(double4);
unsigned int malloc_fl4 = nextsize*sizeof(float4);
unsigned int malloc_ui = nextsize*sizeof(unsigned int);
unsigned int malloc_db = nextsize*sizeof(double);
unsigned int malloc_db4_N = N*sizeof(double4);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
#ifdef GPUCORR
DeviceSafeCall(hipHostMalloc((void**)&step_D[i], malloc_db, hipHostMallocMapped));
#endif
DeviceSafeCall(hipMalloc((void **)&a_D[i], malloc_size));
DeviceSafeCall(hipMalloc((void **)&a1_D[i], malloc_size));
DeviceSafeCall(hipMalloc((void **)&a2_D[i], malloc_size));
DeviceSafeCall(hipMalloc((void **)&a_tot_D[i], malloc_db4_N));
DeviceSafeCall(hipMalloc((void **)&a1_tot_D[i], malloc_db4_N));
DeviceSafeCall(hipMalloc((void **)&a2_tot_D[i], malloc_db4_N));
DeviceSafeCall(hipMalloc((void **)&pos_PD[i], malloc_db4));
DeviceSafeCall(hipMalloc((void **)&pos_CD[i], malloc_db4));
DeviceSafeCall(hipMalloc((void **)&vel_CD[i], malloc_db4));
DeviceSafeCall(hipMalloc((void **)&vel_PD[i], malloc_fl4));
DeviceSafeCall(hipMalloc((void **)&acc_PD[i], malloc_fl4));
DeviceSafeCall(hipMalloc((void **)&next_D[i], malloc_ui));
DeviceSafeCall(hipMalloc((void **)&loc_D[i], malloc_db));
#ifdef APE
DeviceSafeCall(hipMalloc((void **)&step_D[i], malloc_db));
#endif
DeviceSafeCall(hipMalloc((void **)&a3_D[i], malloc_db4_N));
DeviceSafeCall(hipMalloc((void **)&p_v_a3_Dev[i], 3*malloc_db4_N));
DeviceSafeCall(hipMalloc((void **)&a_temp_Dev[i], 3*malloc_db4_N));
DeviceSafeCall(hipMemcpy( pos_PD[i], pos_PH, malloc_db4, hipMemcpyHostToDevice ));
DeviceSafeCall(hipMemcpy( vel_PD[i], vel_PH, malloc_fl4, hipMemcpyHostToDevice ));
DeviceSafeCall(hipMemcpy( pos_CD[i], pos_CH, malloc_db4, hipMemcpyHostToDevice ));
DeviceSafeCall(hipMemcpy( vel_CD[i], vel_CH, malloc_db4, hipMemcpyHostToDevice ));
DeviceSafeCall(hipMemcpy(a_tot_D[i], a_H0, malloc_db4_N, hipMemcpyHostToDevice));
DeviceSafeCall(hipMemcpy(a1_tot_D[i], &a_H0[N], malloc_db4_N, hipMemcpyHostToDevice));
DeviceSafeCall(hipMemcpy(a2_tot_D[i], &a_H0[2*N], malloc_db4_N, hipMemcpyHostToDevice));
DeviceSafeCall(hipMemcpy( loc_D[i], local_time, malloc_db, hipMemcpyHostToDevice));
#ifdef APE
DeviceSafeCall(hipMemcpy( step_D[i], step, malloc_db, hipMemcpyHostToDevice));
#endif
#ifdef APE
for(unsigned int j = 0; j < NGPU; j++){
if(j != i)
DeviceSafeCall(hipDeviceEnablePeerAccess(devices[j], 0));
}
#endif
}
#ifdef GPUCORR
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
for(unsigned int j = 0; j < N; j++)
step_D[i][j] = step[j];
}
#endif
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
int BL = ceil((double)N/TPB);
hipLaunchKernelGGL(( initvectors), dim3(BL), dim3(TPB), 0, 0, a3_D[i], acc_PD[i]);
}
unsigned int ppG = N/(NGPU*size);
unsigned int Bfactor;
unsigned long BLOCKS;
unsigned int DIMENSION, THREADS, bfmax, SHARED;
double *mpi_red_aux = new double [3*N];
double *mpi_red = new double [3*N];
double4* a_H1 = new double4 [3*N];
double4* p_v_a3 = new double4 [3*N];
double4* a3_H = new double4 [N];
double4 *a_H = new double4 [NGPU*3*N];
for(unsigned int i = 0; i < 3*N; i++)
a_H1[i] = a_H0[i];
double E0, kk0, pp0;
int out_index = 1;
ofstream stream;
string temp;
char *output_name;
if(warm){
while(NEXTOUT <= GTW) NEXTOUT += DTPRINT;
while(NEXTOUT <= *GTIME) NEXTOUT += DTPRINT;
if(rank == 0)
HostSafeCall(AcquireEnergy(&E0, path));
}
else{
HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk0, &pp0, plummer_core, plummer_mass, rscale, mscale, devices));
E0 = kk0 + pp0;
temp = path + "energy.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(16);
stream<<0.0<<" "<<0.0<<" "<<kk0<<" "<<pp0<<" "<<2.*kk0/fabs(pp0)<<endl;
stream.close();
}
if(rank == 0){
temp = path + "HiGPUslog.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<"==============================================="<<endl;
stream<<scientific<<setprecision(16);
stream<<"#Initial Total Energy : #"<<E0<<"#"<<endl;
stream.close();
string str = to_string(FMAX) + ".dat";
stream.open(to_char(str), ios::out);
stream << "here!"<<endl;
for(unsigned int i = 0; i < M; i++)
stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
stream.close();
// int i=0;
cout<<"POST_ENERGy"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
}
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
double start = 0.0;
double end = 0.0;
double start_program = 0.0;
double end_program = 0.0;
HiGPUsTimes *Times;
Times = new HiGPUsTimes [N+1];
for(unsigned int i = 0; i <= N; i++){
Times[i].next_time = 0.0;
Times[i].cpynext_time = 0.0;
Times[i].predictor_time = 0.0;
Times[i].evaluation_time = 0.0;
Times[i].reduce_time = 0.0;
Times[i].reposition_time = 0.0;
Times[i].memcpy2_time = 0.0;
Times[i].mpireduce_time = 0.0;
Times[i].corrector_time = 0.0;
Times[i].reconstruct_time = 0.0;
Times[i].energy_time = 0.0;
Times[i].rtime = 0.0;
Times[i].thr = 0.0;
Times[i].totthr = 0.0;
Times[i].bfac = 0.0;
}
// HostSafeCall(GPU_memcheck(NGPU, devices, __FILE__, __LINE__));
HostSafeCall(CPU_memcheck(__FILE__, __LINE__, path));
struct timeval tv;
gettimeofday(&tv, NULL);
int sec = tv.tv_sec;
int microsec = tv.tv_usec;
start_program = sec + microsec * 0.000001;
if(rank == 0)
HostSafeCall(CheckHDDMemory(cleanstop, path));
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD));
if(*cleanstop)
return HNoError;
#ifdef EXPANDING
double plum0 = plummer_core;
#endif
do{
if(*GTIME >= TTIME){
//ClearAll();
gettimeofday(&tv, NULL);
int sec = tv.tv_sec;
int microsec = tv.tv_usec;
end_program = sec + microsec * 0.000001;
delete [] p_v_a3;
delete [] a3_H;
delete [] a_H;
delete [] a_H1;
delete [] mpi_red_aux;
delete [] mpi_red;
delete [] next;
delete [] vetint;
delete [] counter;
delete [] Times;
if(rank == 0){
temp = path + "HiGPUslog.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(6);
stream<<" \n Total integration time : "<<end_program-start_program<<" seconds "<<endl;
stream.close();
}
return HNoError;
}
get_times(&start);
#ifdef GPUCORR
HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step_D[0], next, &nextsize));
#else
HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step, next, &nextsize));
#endif
*GTIME = *ATIME;
#ifdef EXPANDING
plummer_core = plum0*exp(GTIME);
#endif
unsigned int dim2 = ceil((double)nextsize/TPB)*TPB;
for(unsigned int i = nextsize; i < dim2; i++)
next[i] = -1;
get_times(&end);
set_times(end-start, &(Times[nextsize].next_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipMemcpy(next_D[i], next, dim2 * sizeof( int ), hipMemcpyHostToDevice));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].cpynext_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
int BL = ppG/TPB + ceil((double)nextsize/TPB);
int istart = ppG*(i+rank*NGPU);
hipLaunchKernelGGL(( Predictor) , dim3(BL), dim3(TPB), 0, 0, *GTIME, pos_PD[i], vel_PD[i], acc_PD[i], pos_CD[i], vel_CD[i],
loc_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a3_D[i], istart,
next_D[i], ppG, N);
cout << istart<<" "<<BL<<" "<<TPB<<" ";
}
//int i = 0;
cout<<" Predictor"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
get_times(&end);
set_times(end-start, &(Times[nextsize].predictor_time));
THREADS = TPB;
bfmax = BFMAX;
*ATIME = 1.0e+10;
Bfactor = 1;
BLOCKS = ceil((double)nextsize/THREADS);
while(THREADS*BLOCKS < GPUMINTHREADS && THREADS > 32){
THREADS /= 2;
bfmax *= 2;
BLOCKS = ceil((double)nextsize/THREADS);
}
DIMENSION = THREADS*BLOCKS;
while(THREADS*BLOCKS < GPUMINTHREADS && Bfactor < bfmax){
BLOCKS *= 2;
Bfactor *= 2;
}
SHARED = THREADS * (sizeof(double4) + 2 * sizeof(float4));
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipDeviceSynchronize());
}
set_times((double)Bfactor, &(Times[nextsize].bfac));
set_times((double)THREADS, &(Times[nextsize].thr));
set_times((double)BLOCKS*THREADS, &(Times[nextsize].totthr));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceCheckErrors();
int istart = ppG*(i+rank*NGPU);
hipLaunchKernelGGL(( evaluation), dim3(BLOCKS), dim3(THREADS), SHARED , 0, N, pos_PD[i], vel_PD[i], acc_PD[i], a_D[i], a1_D[i], a2_D[i],
istart, ppG, Bfactor, DIMENSION, next_D[i], loc_D[i], *GTIME, EPS, plummer_core, plummer_mass, rscale, mscale);
cout<<"evaluation"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
}
get_times(&end);
set_times(end-start, &(Times[nextsize].evaluation_time));
int bl = BLOCKS;
int bf = Bfactor;
SHARED = THREADS * sizeof(double4);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipDeviceSynchronize());
DeviceCheckErrors();
}
get_times(&start);
while(bf != 1){
bl>>=1;
bf>>=1;
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
hipLaunchKernelGGL(( reduce), dim3(3*bl), dim3(THREADS), SHARED, 0, a_D[i], a1_D[i], a2_D[i], bf, DIMENSION);
}
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipDeviceSynchronize());
DeviceCheckErrors();
}
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reduce_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
hipLaunchKernelGGL(( reposition), dim3(DIMENSION/THREADS), dim3(THREADS), 0, 0, a_D[i], a1_D[i], a2_D[i], a_temp_Dev[i], nextsize);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reposition_time));
unsigned int cpy_size = 3*nextsize;
#ifdef APE
for(unsigned int i = 1; i < NGPU; i++){
int SHRD = THREADS*sizeof(double4);
DeviceSafeCall(hipSetDevice(devices[0]));
DeviceSafeCall(hipMemcpyPeer(p_v_a3_Dev[0], devices[0], a_temp_Dev[i], devices[i], cpy_size*sizeof(double4)));
hipLaunchKernelGGL(( sum_partial), dim3(3*DIMENSION/THREADS), dim3(THREADS), SHRD, 0, p_v_a3_Dev[0], a_temp_Dev[0], 3*nextsize);
DeviceSafeCall(hipDeviceSynchronize());
}
// QUI VA AGGIUNTA LA FUNZIONE CHE RIDUCE A_TEMP_DEV SU TUTTE LE GPU 0 E LO DAI POI A TUTTE LE GPU DI TUTTI I NODI : not yet implemented
//HERE YOU SHOULD ADD THE FUNCTION THAT REDUCES A_TEMP_DEV ON ALL GPU 0 AND THEN GIVES IT TO ALL GPUS OF ALL NODES: not yet implemented
#else
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceCheckErrors();
DeviceSafeCall(hipMemcpy(&a_H[i*cpy_size], a_temp_Dev[i], cpy_size*sizeof(double4), hipMemcpyDeviceToHost));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].memcpy2_time));
get_times(&start);
HostSafeCall(ReduceAll(cpy_size, N, NGPU, nextsize, a_H, a_H1, mpi_red_aux, mpi_red, next));
get_times(&end);
set_times(end-start, &(Times[nextsize].mpireduce_time));
#endif
#ifdef GPUCORR
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), hipMemcpyHostToDevice ));
}
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
hipLaunchKernelGGL(( Corrector_gpu), dim3(DIMENSION/THREADS), dim3(THREADS), 0, 0, *GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i],
a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector //call corrector directly
}
DeviceSafeCall(hipSetDevice(devices[0]));
DeviceSafeCall(hipDeviceSynchronize());
for(unsigned int i = 0; i < nextsize; i++){
int who = next[i];
local_time[who] = *GTIME;
*ATIME = min (local_time[who] + step_D[0][who], *ATIME);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].corrector_time));
#elif APE
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
hipLaunchKernelGGL(( Corrector_gpu), dim3(DIMENSION/THREADS), dim3(THREADS), 0, 0, *GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i],
a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector //call corrector directly
}
DeviceSafeCall(hipSetDevice(devices[0]));
DeviceSafeCall(hipDeviceSynchronize());
DeviceSafeCall(hipMemcpy(step, step_D[0], N*sizeof(double), hipMemcpyDeviceToHost));
for(unsigned int i = 0; i < nextsize; i++){
int who = next[i];
local_time[who] = *GTIME;
*ATIME = min (local_time[who] + step[who], *ATIME);
}
#else
// corrector su cpu
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
hipLaunchKernelGGL(( update_local_time), dim3(DIMENSION/THREADS), dim3(THREADS), 0, 0, next_D[i], loc_D[i], *GTIME);
}
get_times(&start);
HostSafeCall(Corrector(GTIME, ATIME, local_time, step, next, nextsize, pos_CH, vel_CH, a_H0,
a_H1, a3_H, p_v_a3, ETA6, ETA4, DTMAX, DTMIN, N));
cout<<"Corrector"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
get_times(&end);
set_times(end-start, &(Times[nextsize].corrector_time));
#endif
#ifndef APE
#ifndef GPUCORR
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
DeviceSafeCall(hipMemcpy(p_v_a3_Dev[i], p_v_a3, 3*nextsize*sizeof( double4 ), hipMemcpyHostToDevice ));
DeviceSafeCall(hipMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), hipMemcpyHostToDevice ));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].rtime));
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(hipSetDevice(devices[i]));
int BB = 6*DIMENSION/THREADS;
hipLaunchKernelGGL(( Reconstruct), dim3(BB), dim3(THREADS) , 0, 0, next_D[i], nextsize, pos_CD[i], vel_CD[i], a3_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], p_v_a3_Dev[i], a_temp_Dev[i]);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reconstruct_time));
#endif
#endif
cout<<"zeit_if" << *GTIME <<" "<<GTW<<" "<<NEXTOUT<<endl;
if((*GTIME+GTW) >= NEXTOUT ){
DeviceSafeCall(hipSetDevice(devices[0]));
DeviceSafeCall(hipMemcpy( pos_CH, pos_CD[0], malloc_db4, hipMemcpyDeviceToHost ));
DeviceSafeCall(hipMemcpy( vel_CH, vel_CD[0], malloc_db4, hipMemcpyDeviceToHost ));
CheckBlocks(step, M, path);
double kk,pp;
get_times(&start);
HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk, &pp, plummer_core, plummer_mass, rscale, mscale, devices));
get_times(&end);
set_times(end-start, &(Times[nextsize].energy_time));
if(rank == 0){
HostSafeCall(CheckHDDMemory(cleanstop, path));
#ifdef CHECK_TIMES
string ffff = to_string(out_index + FMAX);
ffff = path + "times_"+ffff+".dat";
stream.open(to_char(ffff), ios::out);
stream<<scientific<<setprecision(6);
stream<<"N "<<" NEXT "<<" CPY_NEXT"<<" PRED "<<" EVAL "<<" REDU "<<" REPOS "<<" CPY_ACC "<<" MPI "<<" CORR "<<" CPY_REC "<<" RECON "<<" THREADS "<<" TOTTHREAD "<<" BFACT "<<endl;
for(unsigned int i = 1; i <= N; i++){
if(Times[i].next_time != 0.0)
stream<<i<<" "<<
Times[i].next_time<<" "<<
Times[i].cpynext_time<<" "<<
Times[i].predictor_time<<" "<<
Times[i].evaluation_time<<" "<<
Times[i].reduce_time<<" "<<
Times[i].reposition_time<<" "<<
Times[i].memcpy2_time<<" "<<
Times[i].mpireduce_time<<" "<<
Times[i].corrector_time<<" "<<
Times[i].rtime<<" "<<
Times[i].reconstruct_time<<" "<<
Times[i].thr<<" "<<
Times[i].totthr<<" "<<
Times[i].bfac<<endl;
Times[i].next_time = 0.0;
}
stream.close();
#endif
double E = kk + pp;
temp = path + "energy.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(16);
stream<<*GTIME+GTW<<" "<<fabs((E-E0)/E0)<<" "<<kk<<" "<<pp<<" "<<2.*kk/fabs(pp)<<endl;
stream.close();
string file_name = path + to_string(out_index + FMAX);
file_name += ".dat";
stream.open(to_char(file_name), ios::out);
stream<<scientific<<setprecision(16);
for(unsigned int i = 0; i < M; i++)
stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
stream.close();
out_index++;
}
//int i = 0;
cout<<"short"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD));
if(*cleanstop)
return HNoError;
NEXTOUT+=DTPRINT;
}
}while(1);
}
| e603e310e6d3a98f08456fdea19a7222067288c7.cu | #include <iomanip>
#include <omp.h>
#include <types.h>
#include <my_errors.h>
#include <kernel.h>
#include <functions.h>
#include <utilis.h>
using namespace std;
//extern "C"
HostError Hermite6th(const double TTIME, double* GTIME, double* ATIME, double* local_time, double* step, const unsigned int N, const unsigned int M, double4* pos_PH, float4* vel_PH, double4* pos_CH, double4* vel_CH, double4* a_H0, const unsigned int MAXDIM, unsigned int NGPU, unsigned int TPB, int rank, int size, unsigned int BFMAX, double ETA6, double ETA4, double DTMAX, double DTMIN, double EPS, double DTPRINT, unsigned int FMAX, const bool warm, double GTW, unsigned int GPUMINTHREADS, double plummer_core, double plummer_mass, double rscale, double mscale, vector<unsigned int> devices, bool *cleanstop, string path){
int i = 0;
unsigned int ompthreads = 1; // N must be integer multiple of this number
omp_set_num_threads( ompthreads );
unsigned int* vetint = new unsigned int [N];
unsigned int *counter = new unsigned int [ompthreads];
int *next = new int [N];
unsigned long nextsize = N;
double NEXTOUT = DTPRINT;
double4 **pos_PD = new double4* [NGPU];
float4 **vel_PD = new float4* [NGPU];
float4 **acc_PD = new float4* [NGPU];
double4 **pos_CD = new double4* [NGPU];
double4 **vel_CD = new double4* [NGPU];
int **next_D = new int* [NGPU];
double **loc_D = new double* [NGPU];
#ifdef APE
double **step_D = new double* [NGPU];
#endif
#ifdef GPUCORR
double **step_D = new double* [NGPU];
#endif
double4 **a_D = new double4* [NGPU];
double4 **a1_D = new double4* [NGPU];
double4 **a2_D = new double4* [NGPU];
double4 **a_tot_D = new double4* [NGPU];
double4 **a1_tot_D = new double4* [NGPU];
double4 **a2_tot_D = new double4* [NGPU];
double4 **a3_D = new double4* [NGPU];
double4 **p_v_a3_Dev = new double4* [NGPU];
double4 **a_temp_Dev = new double4* [NGPU];
unsigned int malloc_size = MAXDIM*sizeof(double4); //it contains a, adot, a2dots sequentially
unsigned int malloc_db4 = nextsize*sizeof(double4);
unsigned int malloc_fl4 = nextsize*sizeof(float4);
unsigned int malloc_ui = nextsize*sizeof(unsigned int);
unsigned int malloc_db = nextsize*sizeof(double);
unsigned int malloc_db4_N = N*sizeof(double4);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
#ifdef GPUCORR
DeviceSafeCall(cudaHostAlloc((void**)&step_D[i], malloc_db, cudaHostAllocMapped));
#endif
DeviceSafeCall(cudaMalloc((void **)&a_D[i], malloc_size));
DeviceSafeCall(cudaMalloc((void **)&a1_D[i], malloc_size));
DeviceSafeCall(cudaMalloc((void **)&a2_D[i], malloc_size));
DeviceSafeCall(cudaMalloc((void **)&a_tot_D[i], malloc_db4_N));
DeviceSafeCall(cudaMalloc((void **)&a1_tot_D[i], malloc_db4_N));
DeviceSafeCall(cudaMalloc((void **)&a2_tot_D[i], malloc_db4_N));
DeviceSafeCall(cudaMalloc((void **)&pos_PD[i], malloc_db4));
DeviceSafeCall(cudaMalloc((void **)&pos_CD[i], malloc_db4));
DeviceSafeCall(cudaMalloc((void **)&vel_CD[i], malloc_db4));
DeviceSafeCall(cudaMalloc((void **)&vel_PD[i], malloc_fl4));
DeviceSafeCall(cudaMalloc((void **)&acc_PD[i], malloc_fl4));
DeviceSafeCall(cudaMalloc((void **)&next_D[i], malloc_ui));
DeviceSafeCall(cudaMalloc((void **)&loc_D[i], malloc_db));
#ifdef APE
DeviceSafeCall(cudaMalloc((void **)&step_D[i], malloc_db));
#endif
DeviceSafeCall(cudaMalloc((void **)&a3_D[i], malloc_db4_N));
DeviceSafeCall(cudaMalloc((void **)&p_v_a3_Dev[i], 3*malloc_db4_N));
DeviceSafeCall(cudaMalloc((void **)&a_temp_Dev[i], 3*malloc_db4_N));
DeviceSafeCall(cudaMemcpy( pos_PD[i], pos_PH, malloc_db4, cudaMemcpyHostToDevice ));
DeviceSafeCall(cudaMemcpy( vel_PD[i], vel_PH, malloc_fl4, cudaMemcpyHostToDevice ));
DeviceSafeCall(cudaMemcpy( pos_CD[i], pos_CH, malloc_db4, cudaMemcpyHostToDevice ));
DeviceSafeCall(cudaMemcpy( vel_CD[i], vel_CH, malloc_db4, cudaMemcpyHostToDevice ));
DeviceSafeCall(cudaMemcpy(a_tot_D[i], a_H0, malloc_db4_N, cudaMemcpyHostToDevice));
DeviceSafeCall(cudaMemcpy(a1_tot_D[i], &a_H0[N], malloc_db4_N, cudaMemcpyHostToDevice));
DeviceSafeCall(cudaMemcpy(a2_tot_D[i], &a_H0[2*N], malloc_db4_N, cudaMemcpyHostToDevice));
DeviceSafeCall(cudaMemcpy( loc_D[i], local_time, malloc_db, cudaMemcpyHostToDevice));
#ifdef APE
DeviceSafeCall(cudaMemcpy( step_D[i], step, malloc_db, cudaMemcpyHostToDevice));
#endif
#ifdef APE
for(unsigned int j = 0; j < NGPU; j++){
if(j != i)
DeviceSafeCall(cudaDeviceEnablePeerAccess(devices[j], 0));
}
#endif
}
#ifdef GPUCORR
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
for(unsigned int j = 0; j < N; j++)
step_D[i][j] = step[j];
}
#endif
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
int BL = ceil((double)N/TPB);
initvectors<<<BL, TPB>>>(a3_D[i], acc_PD[i]);
}
unsigned int ppG = N/(NGPU*size);
unsigned int Bfactor;
unsigned long BLOCKS;
unsigned int DIMENSION, THREADS, bfmax, SHARED;
double *mpi_red_aux = new double [3*N];
double *mpi_red = new double [3*N];
double4* a_H1 = new double4 [3*N];
double4* p_v_a3 = new double4 [3*N];
double4* a3_H = new double4 [N];
double4 *a_H = new double4 [NGPU*3*N];
for(unsigned int i = 0; i < 3*N; i++)
a_H1[i] = a_H0[i];
double E0, kk0, pp0;
int out_index = 1;
ofstream stream;
string temp;
char *output_name;
if(warm){
while(NEXTOUT <= GTW) NEXTOUT += DTPRINT;
while(NEXTOUT <= *GTIME) NEXTOUT += DTPRINT;
if(rank == 0)
HostSafeCall(AcquireEnergy(&E0, path));
}
else{
HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk0, &pp0, plummer_core, plummer_mass, rscale, mscale, devices));
E0 = kk0 + pp0;
temp = path + "energy.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(16);
stream<<0.0<<" "<<0.0<<" "<<kk0<<" "<<pp0<<" "<<2.*kk0/fabs(pp0)<<endl;
stream.close();
}
if(rank == 0){
temp = path + "HiGPUslog.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<"==============================================="<<endl;
stream<<scientific<<setprecision(16);
stream<<"#Initial Total Energy : #"<<E0<<"#"<<endl;
stream.close();
string str = to_string(FMAX) + ".dat";
stream.open(to_char(str), ios::out);
stream << "here!"<<endl;
for(unsigned int i = 0; i < M; i++)
stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
stream.close();
// int i=0;
cout<<"POST_ENERGy"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
}
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
double start = 0.0;
double end = 0.0;
double start_program = 0.0;
double end_program = 0.0;
HiGPUsTimes *Times;
Times = new HiGPUsTimes [N+1];
for(unsigned int i = 0; i <= N; i++){
Times[i].next_time = 0.0;
Times[i].cpynext_time = 0.0;
Times[i].predictor_time = 0.0;
Times[i].evaluation_time = 0.0;
Times[i].reduce_time = 0.0;
Times[i].reposition_time = 0.0;
Times[i].memcpy2_time = 0.0;
Times[i].mpireduce_time = 0.0;
Times[i].corrector_time = 0.0;
Times[i].reconstruct_time = 0.0;
Times[i].energy_time = 0.0;
Times[i].rtime = 0.0;
Times[i].thr = 0.0;
Times[i].totthr = 0.0;
Times[i].bfac = 0.0;
}
// HostSafeCall(GPU_memcheck(NGPU, devices, __FILE__, __LINE__));
HostSafeCall(CPU_memcheck(__FILE__, __LINE__, path));
struct timeval tv;
gettimeofday(&tv, NULL);
int sec = tv.tv_sec;
int microsec = tv.tv_usec;
start_program = sec + microsec * 0.000001;
if(rank == 0)
HostSafeCall(CheckHDDMemory(cleanstop, path));
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD));
if(*cleanstop)
return HNoError;
#ifdef EXPANDING
double plum0 = plummer_core;
#endif
do{
if(*GTIME >= TTIME){
//ClearAll();
gettimeofday(&tv, NULL);
int sec = tv.tv_sec;
int microsec = tv.tv_usec;
end_program = sec + microsec * 0.000001;
delete [] p_v_a3;
delete [] a3_H;
delete [] a_H;
delete [] a_H1;
delete [] mpi_red_aux;
delete [] mpi_red;
delete [] next;
delete [] vetint;
delete [] counter;
delete [] Times;
if(rank == 0){
temp = path + "HiGPUslog.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(6);
stream<<" \n Total integration time : "<<end_program-start_program<<" seconds "<<endl;
stream.close();
}
return HNoError;
}
get_times(&start);
#ifdef GPUCORR
HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step_D[0], next, &nextsize));
#else
HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step, next, &nextsize));
#endif
*GTIME = *ATIME;
#ifdef EXPANDING
plummer_core = plum0*exp(GTIME);
#endif
unsigned int dim2 = ceil((double)nextsize/TPB)*TPB;
for(unsigned int i = nextsize; i < dim2; i++)
next[i] = -1;
get_times(&end);
set_times(end-start, &(Times[nextsize].next_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaMemcpy(next_D[i], next, dim2 * sizeof( int ), cudaMemcpyHostToDevice));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].cpynext_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
int BL = ppG/TPB + ceil((double)nextsize/TPB);
int istart = ppG*(i+rank*NGPU);
Predictor <<<BL, TPB>>> (*GTIME, pos_PD[i], vel_PD[i], acc_PD[i], pos_CD[i], vel_CD[i],
loc_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a3_D[i], istart,
next_D[i], ppG, N);
cout << istart<<" "<<BL<<" "<<TPB<<" ";
}
//int i = 0;
cout<<" Predictor"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
get_times(&end);
set_times(end-start, &(Times[nextsize].predictor_time));
THREADS = TPB;
bfmax = BFMAX;
*ATIME = 1.0e+10;
Bfactor = 1;
BLOCKS = ceil((double)nextsize/THREADS);
while(THREADS*BLOCKS < GPUMINTHREADS && THREADS > 32){
THREADS /= 2;
bfmax *= 2;
BLOCKS = ceil((double)nextsize/THREADS);
}
DIMENSION = THREADS*BLOCKS;
while(THREADS*BLOCKS < GPUMINTHREADS && Bfactor < bfmax){
BLOCKS *= 2;
Bfactor *= 2;
}
SHARED = THREADS * (sizeof(double4) + 2 * sizeof(float4));
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaDeviceSynchronize());
}
set_times((double)Bfactor, &(Times[nextsize].bfac));
set_times((double)THREADS, &(Times[nextsize].thr));
set_times((double)BLOCKS*THREADS, &(Times[nextsize].totthr));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceCheckErrors();
int istart = ppG*(i+rank*NGPU);
evaluation<<< BLOCKS, THREADS, SHARED >>> ( N, pos_PD[i], vel_PD[i], acc_PD[i], a_D[i], a1_D[i], a2_D[i],
istart, ppG, Bfactor, DIMENSION, next_D[i], loc_D[i], *GTIME, EPS, plummer_core, plummer_mass, rscale, mscale);
cout<<"evaluation"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
}
get_times(&end);
set_times(end-start, &(Times[nextsize].evaluation_time));
int bl = BLOCKS;
int bf = Bfactor;
SHARED = THREADS * sizeof(double4);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaDeviceSynchronize());
DeviceCheckErrors();
}
get_times(&start);
while(bf != 1){
bl>>=1;
bf>>=1;
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
reduce<<< 3*bl, THREADS, SHARED>>>(a_D[i], a1_D[i], a2_D[i], bf, DIMENSION);
}
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaDeviceSynchronize());
DeviceCheckErrors();
}
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reduce_time));
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
reposition<<<DIMENSION/THREADS, THREADS>>>(a_D[i], a1_D[i], a2_D[i], a_temp_Dev[i], nextsize);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reposition_time));
unsigned int cpy_size = 3*nextsize;
#ifdef APE
for(unsigned int i = 1; i < NGPU; i++){
int SHRD = THREADS*sizeof(double4);
DeviceSafeCall(cudaSetDevice(devices[0]));
DeviceSafeCall(cudaMemcpyPeer(p_v_a3_Dev[0], devices[0], a_temp_Dev[i], devices[i], cpy_size*sizeof(double4)));
sum_partial<<<3*DIMENSION/THREADS, THREADS, SHRD>>>(p_v_a3_Dev[0], a_temp_Dev[0], 3*nextsize);
DeviceSafeCall(cudaDeviceSynchronize());
}
// QUI VA AGGIUNTA LA FUNZIONE CHE RIDUCE A_TEMP_DEV SU TUTTE LE GPU 0 E LO DAI POI A TUTTE LE GPU DI TUTTI I NODI : not yet implemented
//HERE YOU SHOULD ADD THE FUNCTION THAT REDUCES A_TEMP_DEV ON ALL GPU 0 AND THEN GIVES IT TO ALL GPUS OF ALL NODES: not yet implemented
#else
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceCheckErrors();
DeviceSafeCall(cudaMemcpy(&a_H[i*cpy_size], a_temp_Dev[i], cpy_size*sizeof(double4), cudaMemcpyDeviceToHost));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].memcpy2_time));
get_times(&start);
HostSafeCall(ReduceAll(cpy_size, N, NGPU, nextsize, a_H, a_H1, mpi_red_aux, mpi_red, next));
get_times(&end);
set_times(end-start, &(Times[nextsize].mpireduce_time));
#endif
#ifdef GPUCORR
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice ));
}
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
Corrector_gpu<<<DIMENSION/THREADS, THREADS>>>(*GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i],
a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector //call corrector directly
}
DeviceSafeCall(cudaSetDevice(devices[0]));
DeviceSafeCall(cudaDeviceSynchronize());
for(unsigned int i = 0; i < nextsize; i++){
int who = next[i];
local_time[who] = *GTIME;
*ATIME = min (local_time[who] + step_D[0][who], *ATIME);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].corrector_time));
#elif APE
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
Corrector_gpu<<<DIMENSION/THREADS, THREADS>>>(*GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i],
a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector //call corrector directly
}
DeviceSafeCall(cudaSetDevice(devices[0]));
DeviceSafeCall(cudaDeviceSynchronize());
DeviceSafeCall(cudaMemcpy(step, step_D[0], N*sizeof(double), cudaMemcpyDeviceToHost));
for(unsigned int i = 0; i < nextsize; i++){
int who = next[i];
local_time[who] = *GTIME;
*ATIME = min (local_time[who] + step[who], *ATIME);
}
#else
// corrector su cpu
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
update_local_time<<<DIMENSION/THREADS, THREADS>>>(next_D[i], loc_D[i], *GTIME);
}
get_times(&start);
HostSafeCall(Corrector(GTIME, ATIME, local_time, step, next, nextsize, pos_CH, vel_CH, a_H0,
a_H1, a3_H, p_v_a3, ETA6, ETA4, DTMAX, DTMIN, N));
cout<<"Corrector"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
get_times(&end);
set_times(end-start, &(Times[nextsize].corrector_time));
#endif
#ifndef APE
#ifndef GPUCORR
get_times(&start);
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
DeviceSafeCall(cudaMemcpy(p_v_a3_Dev[i], p_v_a3, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice ));
DeviceSafeCall(cudaMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice ));
}
get_times(&end);
set_times(end-start, &(Times[nextsize].rtime));
for(unsigned int i = 0; i < NGPU; i++){
DeviceSafeCall(cudaSetDevice(devices[i]));
int BB = 6*DIMENSION/THREADS;
Reconstruct<<< BB, THREADS >>>(next_D[i], nextsize, pos_CD[i], vel_CD[i], a3_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], p_v_a3_Dev[i], a_temp_Dev[i]);
}
get_times(&end);
set_times(end-start, &(Times[nextsize].reconstruct_time));
#endif
#endif
cout<<"zeit_if" << *GTIME <<" "<<GTW<<" "<<NEXTOUT<<endl;
if((*GTIME+GTW) >= NEXTOUT ){
DeviceSafeCall(cudaSetDevice(devices[0]));
DeviceSafeCall(cudaMemcpy( pos_CH, pos_CD[0], malloc_db4, cudaMemcpyDeviceToHost ));
DeviceSafeCall(cudaMemcpy( vel_CH, vel_CD[0], malloc_db4, cudaMemcpyDeviceToHost ));
CheckBlocks(step, M, path);
double kk,pp;
get_times(&start);
HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk, &pp, plummer_core, plummer_mass, rscale, mscale, devices));
get_times(&end);
set_times(end-start, &(Times[nextsize].energy_time));
if(rank == 0){
HostSafeCall(CheckHDDMemory(cleanstop, path));
#ifdef CHECK_TIMES
string ffff = to_string(out_index + FMAX);
ffff = path + "times_"+ffff+".dat";
stream.open(to_char(ffff), ios::out);
stream<<scientific<<setprecision(6);
stream<<"N "<<" NEXT "<<" CPY_NEXT"<<" PRED "<<" EVAL "<<" REDU "<<" REPOS "<<" CPY_ACC "<<" MPI "<<" CORR "<<" CPY_REC "<<" RECON "<<" THREADS "<<" TOTTHREAD "<<" BFACT "<<endl;
for(unsigned int i = 1; i <= N; i++){
if(Times[i].next_time != 0.0)
stream<<i<<" "<<
Times[i].next_time<<" "<<
Times[i].cpynext_time<<" "<<
Times[i].predictor_time<<" "<<
Times[i].evaluation_time<<" "<<
Times[i].reduce_time<<" "<<
Times[i].reposition_time<<" "<<
Times[i].memcpy2_time<<" "<<
Times[i].mpireduce_time<<" "<<
Times[i].corrector_time<<" "<<
Times[i].rtime<<" "<<
Times[i].reconstruct_time<<" "<<
Times[i].thr<<" "<<
Times[i].totthr<<" "<<
Times[i].bfac<<endl;
Times[i].next_time = 0.0;
}
stream.close();
#endif
double E = kk + pp;
temp = path + "energy.dat";
output_name = to_char(temp);
stream.open(output_name, ios::app);
stream<<scientific<<setprecision(16);
stream<<*GTIME+GTW<<" "<<fabs((E-E0)/E0)<<" "<<kk<<" "<<pp<<" "<<2.*kk/fabs(pp)<<endl;
stream.close();
string file_name = path + to_string(out_index + FMAX);
file_name += ".dat";
stream.open(to_char(file_name), ios::out);
stream<<scientific<<setprecision(16);
for(unsigned int i = 0; i < M; i++)
stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
stream.close();
out_index++;
}
//int i = 0;
cout<<"short"<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl;
MPISafeCall(MPI_Barrier(MPI_COMM_WORLD));
MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD));
if(*cleanstop)
return HNoError;
NEXTOUT+=DTPRINT;
}
}while(1);
}
|
25433a29fd80fde815488d7fddd1dfe944be7adb.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM3DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template hipError_t gpu_compute_dem3d_forces<Scalar, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const unsigned int *d_nextFaces,
const unsigned int *d_firstFaceVertices,
const unsigned int *d_nextVertices,
const unsigned int *d_realVertices,
const Scalar4 *d_vertices,
const Scalar *d_diam,
const Scalar4 *d_velocity,
const unsigned int maxFeatures, const unsigned int maxVertices,
const unsigned int numFaces,
const unsigned int numDegenerateVerts,
const unsigned int numVerts,
const unsigned int numEdges,
const unsigned int numTypes,
const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM evaluator, const Scalar r_cutsq,
const unsigned int particlesPerBlock, const unsigned int *d_firstTypeVert,
const unsigned int *d_numTypeVerts, const unsigned int *d_firstTypeEdge,
const unsigned int *d_numTypeEdges, const unsigned int *d_numTypeFaces,
const unsigned int *d_vertexConnectivity, const unsigned int *d_edges);
| 25433a29fd80fde815488d7fddd1dfe944be7adb.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM3DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template cudaError_t gpu_compute_dem3d_forces<Scalar, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const unsigned int *d_nextFaces,
const unsigned int *d_firstFaceVertices,
const unsigned int *d_nextVertices,
const unsigned int *d_realVertices,
const Scalar4 *d_vertices,
const Scalar *d_diam,
const Scalar4 *d_velocity,
const unsigned int maxFeatures, const unsigned int maxVertices,
const unsigned int numFaces,
const unsigned int numDegenerateVerts,
const unsigned int numVerts,
const unsigned int numEdges,
const unsigned int numTypes,
const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM evaluator, const Scalar r_cutsq,
const unsigned int particlesPerBlock, const unsigned int *d_firstTypeVert,
const unsigned int *d_numTypeVerts, const unsigned int *d_firstTypeEdge,
const unsigned int *d_numTypeEdges, const unsigned int *d_numTypeFaces,
const unsigned int *d_vertexConnectivity, const unsigned int *d_edges);
|
2634b7efc6d61df566713aed4f818f5a62735a9d.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2011, T. Kroes <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Core.cuh"
texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexDensity;
texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexGradientMagnitude;
texture<float, hipTextureType3D, hipReadModeElementType> gTexExtinction;
texture<float, hipTextureType1D, hipReadModeElementType> gTexOpacity;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexDiffuse;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexSpecular;
texture<float, hipTextureType1D, hipReadModeElementType> gTexRoughness;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexEmission;
texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> gTexRunningEstimateRgba;
hipArray* gpDensityArray = NULL;
hipArray* gpGradientMagnitudeArray = NULL;
hipArray* gpOpacityArray = NULL;
hipArray* gpDiffuseArray = NULL;
hipArray* gpSpecularArray = NULL;
hipArray* gpRoughnessArray = NULL;
hipArray* gpEmissionArray = NULL;
CD float3 gAaBbMin;
CD float3 gAaBbMax;
CD float3 gInvAaBbMin;
CD float3 gInvAaBbMax;
CD float gIntensityMin;
CD float gIntensityMax;
CD float gIntensityRange;
CD float gIntensityInvRange;
CD float gStepSize;
CD float gStepSizeShadow;
CD float gDensityScale;
CD float gGradientDelta;
CD float gInvGradientDelta;
CD float3 gGradientDeltaX;
CD float3 gGradientDeltaY;
CD float3 gGradientDeltaZ;
CD int gFilmWidth;
CD int gFilmHeight;
CD int gFilmNoPixels;
CD int gFilterWidth;
CD float gFilterWeights[10];
CD float gExposure;
CD float gInvExposure;
CD float gGamma;
CD float gInvGamma;
CD float gDenoiseEnabled;
CD float gDenoiseWindowRadius;
CD float gDenoiseInvWindowArea;
CD float gDenoiseNoise;
CD float gDenoiseWeightThreshold;
CD float gDenoiseLerpThreshold;
CD float gDenoiseLerpC;
CD float gNoIterations;
CD float gInvNoIterations;
#define TF_NO_SAMPLES 128
#define INV_TF_NO_SAMPLES 1.0f / (float)TF_NO_SAMPLES
#include "Model.cuh"
#include "View.cuh"
#include "Blur.cuh"
#include "Denoise.cuh"
#include "Estimate.cuh"
#include "Utilities.cuh"
#include "SingleScattering.cuh"
#include "NearestIntersection.cuh"
#include "SpecularBloom.cuh"
#include "ToneMap.cuh"
CCudaModel gModel;
CCudaView gRenderCanvasView;
CCudaView gNavigatorView;
void BindDensityBuffer(short* pBuffer, hipExtent Extent)
{
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>();
HandleCudaError(hipMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent));
hipMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpDensityArray;
CopyParams.extent = Extent;
CopyParams.kind = hipMemcpyHostToDevice;
HandleCudaError(hipMemcpy3D(&CopyParams));
gTexDensity.normalized = true;
gTexDensity.filterMode = hipFilterModeLinear;
gTexDensity.addressMode[0] = hipAddressModeClamp;
gTexDensity.addressMode[1] = hipAddressModeClamp;
gTexDensity.addressMode[2] = hipAddressModeClamp;
HandleCudaError(hipBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc));
}
void BindGradientMagnitudeBuffer(short* pBuffer, hipExtent Extent)
{
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>();
HandleCudaError(hipMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent));
hipMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpGradientMagnitudeArray;
CopyParams.extent = Extent;
CopyParams.kind = hipMemcpyHostToDevice;
HandleCudaError(hipMemcpy3D(&CopyParams));
gTexGradientMagnitude.normalized = true;
gTexGradientMagnitude.filterMode = hipFilterModeLinear;
gTexGradientMagnitude.addressMode[0] = hipAddressModeClamp;
gTexGradientMagnitude.addressMode[1] = hipAddressModeClamp;
gTexGradientMagnitude.addressMode[2] = hipAddressModeClamp;
HandleCudaError(hipBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc));
}
void UnbindDensityBuffer(void)
{
HandleCudaError(hipFreeArray(gpDensityArray));
gpDensityArray = NULL;
HandleCudaError(hipUnbindTexture(gTexDensity));
}
void UnbindGradientMagnitudeBuffer(void)
{
HandleCudaError(hipFreeArray(gpGradientMagnitudeArray));
gpGradientMagnitudeArray = NULL;
HandleCudaError(hipUnbindTexture(gTexGradientMagnitude));
}
void BindRenderCanvasView(const CResolution2D& Resolution)
{
gRenderCanvasView.Resize(Resolution);
hipChannelFormatDesc Channel;
Channel = hipCreateChannelDesc<uchar4>();
HandleCudaError(hipBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch()));
}
void ResetRenderCanvasView(void)
{
gRenderCanvasView.Reset();
}
void FreeRenderCanvasView(void)
{
gRenderCanvasView.Free();
}
unsigned char* GetDisplayEstimate(void)
{
return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0);
}
void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity)
{
gTexOpacity.normalized = true;
gTexOpacity.filterMode = hipFilterModeLinear;
gTexOpacity.addressMode[0] = hipAddressModeClamp;
float Opacity[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r;
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>();
if (gpOpacityArray == NULL)
HandleCudaError(hipMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc));
}
void UnbindTransferFunctionOpacity(void)
{
HandleCudaError(hipFreeArray(gpOpacityArray));
gpOpacityArray = NULL;
HandleCudaError(hipUnbindTexture(gTexOpacity));
}
void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse)
{
gTexDiffuse.normalized = true;
gTexDiffuse.filterMode = hipFilterModeLinear;
gTexDiffuse.addressMode[0] = hipAddressModeClamp;
float4 Diffuse[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r;
Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g;
Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpDiffuseArray == NULL)
HandleCudaError(hipMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc));
}
void UnbindTransferFunctionDiffuse(void)
{
HandleCudaError(hipFreeArray(gpDiffuseArray));
gpDiffuseArray = NULL;
HandleCudaError(hipUnbindTexture(gTexDiffuse));
}
void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular)
{
gTexSpecular.normalized = true;
gTexSpecular.filterMode = hipFilterModeLinear;
gTexSpecular.addressMode[0] = hipAddressModeClamp;
float4 Specular[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r;
Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g;
Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpSpecularArray == NULL)
HandleCudaError(hipMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc));
}
void UnbindTransferFunctionSpecular(void)
{
HandleCudaError(hipFreeArray(gpSpecularArray));
gpSpecularArray = NULL;
HandleCudaError(hipUnbindTexture(gTexSpecular));
}
void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness)
{
gTexRoughness.normalized = true;
gTexRoughness.filterMode = hipFilterModeLinear;
gTexRoughness.addressMode[0] = hipAddressModeClamp;
float Roughness[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r;
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>();
if (gpRoughnessArray == NULL)
HandleCudaError(hipMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc));
}
void UnbindTransferFunctionRoughness(void)
{
HandleCudaError(hipFreeArray(gpRoughnessArray));
gpRoughnessArray = NULL;
HandleCudaError(hipUnbindTexture(gTexRoughness));
}
void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission)
{
gTexEmission.normalized = true;
gTexEmission.filterMode = hipFilterModeLinear;
gTexEmission.addressMode[0] = hipAddressModeClamp;
float4 Emission[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r;
Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g;
Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpEmissionArray == NULL)
HandleCudaError(hipMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc));
}
void UnbindTransferFunctionEmission(void)
{
HandleCudaError(hipFreeArray(gpEmissionArray));
gpEmissionArray = NULL;
HandleCudaError(hipUnbindTexture(gTexEmission));
}
void BindConstants(CScene* pScene)
{
const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z);
const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z);
HandleCudaError(hipMemcpyToSymbol("gAaBbMin", &AaBbMin, sizeof(float3)));
HandleCudaError(hipMemcpyToSymbol("gAaBbMax", &AaBbMax, sizeof(float3)));
const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z);
const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z);
HandleCudaError(hipMemcpyToSymbol("gInvAaBbMin", &InvAaBbMin, sizeof(float3)));
HandleCudaError(hipMemcpyToSymbol("gInvAaBbMax", &InvAaBbMax, sizeof(float3)));
const float IntensityMin = pScene->m_IntensityRange.GetMin();
const float IntensityMax = pScene->m_IntensityRange.GetMax();
const float IntensityRange = pScene->m_IntensityRange.GetRange();
const float IntensityInvRange = 1.0f / IntensityRange;
HandleCudaError(hipMemcpyToSymbol("gIntensityMin", &IntensityMin, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityMax", &IntensityMax, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityRange", &IntensityRange, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityInvRange", &IntensityInvRange, sizeof(float)));
const float StepSize = pScene->m_StepSizeFactor * pScene->m_GradientDelta;
const float StepSizeShadow = pScene->m_StepSizeFactorShadow * pScene->m_GradientDelta;
HandleCudaError(hipMemcpyToSymbol("gStepSize", &StepSize, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gStepSizeShadow", &StepSizeShadow, sizeof(float)));
const float DensityScale = pScene->m_DensityScale;
HandleCudaError(hipMemcpyToSymbol("gDensityScale", &DensityScale, sizeof(float)));
const float GradientDelta = 1.0f * pScene->m_GradientDelta;
const float InvGradientDelta = 1.0f / GradientDelta;
const Vec3f GradientDeltaX(GradientDelta, 0.0f, 0.0f);
const Vec3f GradientDeltaY(0.0f, GradientDelta, 0.0f);
const Vec3f GradientDeltaZ(0.0f, 0.0f, GradientDelta);
HandleCudaError(hipMemcpyToSymbol("gGradientDelta", &GradientDelta, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvGradientDelta", &InvGradientDelta, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaX", &GradientDeltaX, sizeof(Vec3f)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaY", &GradientDeltaY, sizeof(Vec3f)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaZ", &GradientDeltaZ, sizeof(Vec3f)));
const int FilmWidth = pScene->m_Camera.m_Film.GetWidth();
const int Filmheight = pScene->m_Camera.m_Film.GetHeight();
const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements();
HandleCudaError(hipMemcpyToSymbol("gFilmWidth", &FilmWidth, sizeof(int)));
HandleCudaError(hipMemcpyToSymbol("gFilmHeight", &Filmheight, sizeof(int)));
HandleCudaError(hipMemcpyToSymbol("gFilmNoPixels", &FilmNoPixels, sizeof(int)));
const int FilterWidth = 1;
HandleCudaError(hipMemcpyToSymbol("gFilterWidth", &FilterWidth, sizeof(int)));
const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
HandleCudaError(hipMemcpyToSymbol("gFilterWeights", &FilterWeights, 10 * sizeof(float)));
const float Gamma = pScene->m_Camera.m_Film.m_Gamma;
const float InvGamma = 1.0f / Gamma;
const float Exposure = pScene->m_Camera.m_Film.m_Exposure;
const float InvExposure = 1.0f / Exposure;
HandleCudaError(hipMemcpyToSymbol("gExposure", &Exposure, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvExposure", &InvExposure, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gGamma", &Gamma, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvGamma", &InvGamma, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseEnabled", &pScene->m_DenoiseParams.m_Enabled, sizeof(bool)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseWindowRadius", &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseInvWindowArea", &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseNoise", &pScene->m_DenoiseParams.m_Noise, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseWeightThreshold", &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseLerpThreshold", &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseLerpC", &pScene->m_DenoiseParams.m_LerpC, sizeof(float)));
const float NoIterations = pScene->GetNoIterations();
const float InvNoIterations = 1.0f / max(1.0f, NoIterations);
HandleCudaError(hipMemcpyToSymbol("gNoIterations", &NoIterations, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvNoIterations", &InvNoIterations, sizeof(float)));
}
void Render(const int& Type, CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage)
{
CScene* pDevScene = NULL;
HandleCudaError(hipMalloc(&pDevScene, sizeof(CScene)));
HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice));
if (Scene.m_Camera.m_Focus.m_Type == 0)
Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene);
HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice));
CCudaView* pDevView = NULL;
HandleCudaError(hipMalloc(&pDevView, sizeof(CCudaView)));
HandleCudaError(hipMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), hipMemcpyHostToDevice));
CCudaTimer TmrRender;
switch (Type)
{
case 0:
{
SingleScattering(&Scene, pDevScene, pDevView);
break;
}
case 1:
{
// MultipleScattering(&Scene, pDevScene);
break;
}
}
RenderImage.AddDuration(TmrRender.ElapsedTime());
CCudaTimer TmrBlur;
Blur(&Scene, pDevScene, pDevView);
BlurImage.AddDuration(TmrBlur.ElapsedTime());
CCudaTimer TmrPostProcess;
Estimate(&Scene, pDevScene, pDevView);
PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime());
ToneMap(&Scene, pDevScene, pDevView);
CCudaTimer TmrDenoise;
Denoise(&Scene, pDevScene, pDevView);
DenoiseImage.AddDuration(TmrDenoise.ElapsedTime());
HandleCudaError(hipFree(pDevScene));
HandleCudaError(hipFree(pDevView));
}
| 2634b7efc6d61df566713aed4f818f5a62735a9d.cu | /*
Copyright (c) 2011, T. Kroes <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Core.cuh"
texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexDensity;
texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexGradientMagnitude;
texture<float, cudaTextureType3D, cudaReadModeElementType> gTexExtinction;
texture<float, cudaTextureType1D, cudaReadModeElementType> gTexOpacity;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexDiffuse;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexSpecular;
texture<float, cudaTextureType1D, cudaReadModeElementType> gTexRoughness;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexEmission;
texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> gTexRunningEstimateRgba;
cudaArray* gpDensityArray = NULL;
cudaArray* gpGradientMagnitudeArray = NULL;
cudaArray* gpOpacityArray = NULL;
cudaArray* gpDiffuseArray = NULL;
cudaArray* gpSpecularArray = NULL;
cudaArray* gpRoughnessArray = NULL;
cudaArray* gpEmissionArray = NULL;
CD float3 gAaBbMin;
CD float3 gAaBbMax;
CD float3 gInvAaBbMin;
CD float3 gInvAaBbMax;
CD float gIntensityMin;
CD float gIntensityMax;
CD float gIntensityRange;
CD float gIntensityInvRange;
CD float gStepSize;
CD float gStepSizeShadow;
CD float gDensityScale;
CD float gGradientDelta;
CD float gInvGradientDelta;
CD float3 gGradientDeltaX;
CD float3 gGradientDeltaY;
CD float3 gGradientDeltaZ;
CD int gFilmWidth;
CD int gFilmHeight;
CD int gFilmNoPixels;
CD int gFilterWidth;
CD float gFilterWeights[10];
CD float gExposure;
CD float gInvExposure;
CD float gGamma;
CD float gInvGamma;
CD float gDenoiseEnabled;
CD float gDenoiseWindowRadius;
CD float gDenoiseInvWindowArea;
CD float gDenoiseNoise;
CD float gDenoiseWeightThreshold;
CD float gDenoiseLerpThreshold;
CD float gDenoiseLerpC;
CD float gNoIterations;
CD float gInvNoIterations;
#define TF_NO_SAMPLES 128
#define INV_TF_NO_SAMPLES 1.0f / (float)TF_NO_SAMPLES
#include "Model.cuh"
#include "View.cuh"
#include "Blur.cuh"
#include "Denoise.cuh"
#include "Estimate.cuh"
#include "Utilities.cuh"
#include "SingleScattering.cuh"
#include "NearestIntersection.cuh"
#include "SpecularBloom.cuh"
#include "ToneMap.cuh"
CCudaModel gModel;
CCudaView gRenderCanvasView;
CCudaView gNavigatorView;
void BindDensityBuffer(short* pBuffer, cudaExtent Extent)
{
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>();
HandleCudaError(cudaMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent));
cudaMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpDensityArray;
CopyParams.extent = Extent;
CopyParams.kind = cudaMemcpyHostToDevice;
HandleCudaError(cudaMemcpy3D(&CopyParams));
gTexDensity.normalized = true;
gTexDensity.filterMode = cudaFilterModeLinear;
gTexDensity.addressMode[0] = cudaAddressModeClamp;
gTexDensity.addressMode[1] = cudaAddressModeClamp;
gTexDensity.addressMode[2] = cudaAddressModeClamp;
HandleCudaError(cudaBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc));
}
void BindGradientMagnitudeBuffer(short* pBuffer, cudaExtent Extent)
{
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>();
HandleCudaError(cudaMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent));
cudaMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpGradientMagnitudeArray;
CopyParams.extent = Extent;
CopyParams.kind = cudaMemcpyHostToDevice;
HandleCudaError(cudaMemcpy3D(&CopyParams));
gTexGradientMagnitude.normalized = true;
gTexGradientMagnitude.filterMode = cudaFilterModeLinear;
gTexGradientMagnitude.addressMode[0] = cudaAddressModeClamp;
gTexGradientMagnitude.addressMode[1] = cudaAddressModeClamp;
gTexGradientMagnitude.addressMode[2] = cudaAddressModeClamp;
HandleCudaError(cudaBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc));
}
void UnbindDensityBuffer(void)
{
HandleCudaError(cudaFreeArray(gpDensityArray));
gpDensityArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexDensity));
}
void UnbindGradientMagnitudeBuffer(void)
{
HandleCudaError(cudaFreeArray(gpGradientMagnitudeArray));
gpGradientMagnitudeArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexGradientMagnitude));
}
void BindRenderCanvasView(const CResolution2D& Resolution)
{
gRenderCanvasView.Resize(Resolution);
cudaChannelFormatDesc Channel;
Channel = cudaCreateChannelDesc<uchar4>();
HandleCudaError(cudaBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch()));
}
void ResetRenderCanvasView(void)
{
gRenderCanvasView.Reset();
}
void FreeRenderCanvasView(void)
{
gRenderCanvasView.Free();
}
unsigned char* GetDisplayEstimate(void)
{
return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0);
}
void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity)
{
gTexOpacity.normalized = true;
gTexOpacity.filterMode = cudaFilterModeLinear;
gTexOpacity.addressMode[0] = cudaAddressModeClamp;
float Opacity[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r;
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>();
if (gpOpacityArray == NULL)
HandleCudaError(cudaMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc));
}
void UnbindTransferFunctionOpacity(void)
{
HandleCudaError(cudaFreeArray(gpOpacityArray));
gpOpacityArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexOpacity));
}
void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse)
{
gTexDiffuse.normalized = true;
gTexDiffuse.filterMode = cudaFilterModeLinear;
gTexDiffuse.addressMode[0] = cudaAddressModeClamp;
float4 Diffuse[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r;
Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g;
Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpDiffuseArray == NULL)
HandleCudaError(cudaMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc));
}
void UnbindTransferFunctionDiffuse(void)
{
HandleCudaError(cudaFreeArray(gpDiffuseArray));
gpDiffuseArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexDiffuse));
}
void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular)
{
gTexSpecular.normalized = true;
gTexSpecular.filterMode = cudaFilterModeLinear;
gTexSpecular.addressMode[0] = cudaAddressModeClamp;
float4 Specular[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r;
Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g;
Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpSpecularArray == NULL)
HandleCudaError(cudaMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc));
}
void UnbindTransferFunctionSpecular(void)
{
HandleCudaError(cudaFreeArray(gpSpecularArray));
gpSpecularArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexSpecular));
}
void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness)
{
gTexRoughness.normalized = true;
gTexRoughness.filterMode = cudaFilterModeLinear;
gTexRoughness.addressMode[0] = cudaAddressModeClamp;
float Roughness[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r;
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>();
if (gpRoughnessArray == NULL)
HandleCudaError(cudaMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc));
}
void UnbindTransferFunctionRoughness(void)
{
HandleCudaError(cudaFreeArray(gpRoughnessArray));
gpRoughnessArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexRoughness));
}
void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission)
{
gTexEmission.normalized = true;
gTexEmission.filterMode = cudaFilterModeLinear;
gTexEmission.addressMode[0] = cudaAddressModeClamp;
float4 Emission[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r;
Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g;
Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpEmissionArray == NULL)
HandleCudaError(cudaMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc));
}
void UnbindTransferFunctionEmission(void)
{
HandleCudaError(cudaFreeArray(gpEmissionArray));
gpEmissionArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexEmission));
}
void BindConstants(CScene* pScene)
{
const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z);
const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z);
HandleCudaError(cudaMemcpyToSymbol("gAaBbMin", &AaBbMin, sizeof(float3)));
HandleCudaError(cudaMemcpyToSymbol("gAaBbMax", &AaBbMax, sizeof(float3)));
const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z);
const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z);
HandleCudaError(cudaMemcpyToSymbol("gInvAaBbMin", &InvAaBbMin, sizeof(float3)));
HandleCudaError(cudaMemcpyToSymbol("gInvAaBbMax", &InvAaBbMax, sizeof(float3)));
const float IntensityMin = pScene->m_IntensityRange.GetMin();
const float IntensityMax = pScene->m_IntensityRange.GetMax();
const float IntensityRange = pScene->m_IntensityRange.GetRange();
const float IntensityInvRange = 1.0f / IntensityRange;
HandleCudaError(cudaMemcpyToSymbol("gIntensityMin", &IntensityMin, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityMax", &IntensityMax, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityRange", &IntensityRange, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityInvRange", &IntensityInvRange, sizeof(float)));
const float StepSize = pScene->m_StepSizeFactor * pScene->m_GradientDelta;
const float StepSizeShadow = pScene->m_StepSizeFactorShadow * pScene->m_GradientDelta;
HandleCudaError(cudaMemcpyToSymbol("gStepSize", &StepSize, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gStepSizeShadow", &StepSizeShadow, sizeof(float)));
const float DensityScale = pScene->m_DensityScale;
HandleCudaError(cudaMemcpyToSymbol("gDensityScale", &DensityScale, sizeof(float)));
const float GradientDelta = 1.0f * pScene->m_GradientDelta;
const float InvGradientDelta = 1.0f / GradientDelta;
const Vec3f GradientDeltaX(GradientDelta, 0.0f, 0.0f);
const Vec3f GradientDeltaY(0.0f, GradientDelta, 0.0f);
const Vec3f GradientDeltaZ(0.0f, 0.0f, GradientDelta);
HandleCudaError(cudaMemcpyToSymbol("gGradientDelta", &GradientDelta, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvGradientDelta", &InvGradientDelta, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaX", &GradientDeltaX, sizeof(Vec3f)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaY", &GradientDeltaY, sizeof(Vec3f)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaZ", &GradientDeltaZ, sizeof(Vec3f)));
const int FilmWidth = pScene->m_Camera.m_Film.GetWidth();
const int Filmheight = pScene->m_Camera.m_Film.GetHeight();
const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements();
HandleCudaError(cudaMemcpyToSymbol("gFilmWidth", &FilmWidth, sizeof(int)));
HandleCudaError(cudaMemcpyToSymbol("gFilmHeight", &Filmheight, sizeof(int)));
HandleCudaError(cudaMemcpyToSymbol("gFilmNoPixels", &FilmNoPixels, sizeof(int)));
const int FilterWidth = 1;
HandleCudaError(cudaMemcpyToSymbol("gFilterWidth", &FilterWidth, sizeof(int)));
const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
HandleCudaError(cudaMemcpyToSymbol("gFilterWeights", &FilterWeights, 10 * sizeof(float)));
const float Gamma = pScene->m_Camera.m_Film.m_Gamma;
const float InvGamma = 1.0f / Gamma;
const float Exposure = pScene->m_Camera.m_Film.m_Exposure;
const float InvExposure = 1.0f / Exposure;
HandleCudaError(cudaMemcpyToSymbol("gExposure", &Exposure, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvExposure", &InvExposure, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gGamma", &Gamma, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvGamma", &InvGamma, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseEnabled", &pScene->m_DenoiseParams.m_Enabled, sizeof(bool)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseWindowRadius", &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseInvWindowArea", &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseNoise", &pScene->m_DenoiseParams.m_Noise, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseWeightThreshold", &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseLerpThreshold", &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseLerpC", &pScene->m_DenoiseParams.m_LerpC, sizeof(float)));
const float NoIterations = pScene->GetNoIterations();
const float InvNoIterations = 1.0f / max(1.0f, NoIterations);
HandleCudaError(cudaMemcpyToSymbol("gNoIterations", &NoIterations, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvNoIterations", &InvNoIterations, sizeof(float)));
}
void Render(const int& Type, CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage)
{
CScene* pDevScene = NULL;
HandleCudaError(cudaMalloc(&pDevScene, sizeof(CScene)));
HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice));
if (Scene.m_Camera.m_Focus.m_Type == 0)
Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene);
HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice));
CCudaView* pDevView = NULL;
HandleCudaError(cudaMalloc(&pDevView, sizeof(CCudaView)));
HandleCudaError(cudaMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), cudaMemcpyHostToDevice));
CCudaTimer TmrRender;
switch (Type)
{
case 0:
{
SingleScattering(&Scene, pDevScene, pDevView);
break;
}
case 1:
{
// MultipleScattering(&Scene, pDevScene);
break;
}
}
RenderImage.AddDuration(TmrRender.ElapsedTime());
CCudaTimer TmrBlur;
Blur(&Scene, pDevScene, pDevView);
BlurImage.AddDuration(TmrBlur.ElapsedTime());
CCudaTimer TmrPostProcess;
Estimate(&Scene, pDevScene, pDevView);
PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime());
ToneMap(&Scene, pDevScene, pDevView);
CCudaTimer TmrDenoise;
Denoise(&Scene, pDevScene, pDevView);
DenoiseImage.AddDuration(TmrDenoise.ElapsedTime());
HandleCudaError(cudaFree(pDevScene));
HandleCudaError(cudaFree(pDevView));
}
|
7975e50e6af74af68cb5c61b31ff6283c26fd993.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
template <int BlockSize>
struct TPairBinaryHist {
float* Slice;
__forceinline__ __device__ int HistSize() {
return BlockSize * 16;
}
__forceinline__ __device__ int SliceOffset() {
return 512 * (threadIdx.x >> 5);
}
__forceinline__ __device__ TPairBinaryHist(float* buff) {
Slice = buff;
for (int i = threadIdx.x; i < HistSize(); i += BlockSize) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll 1
for (int i = 0; i < 8; i++) {
uchar f = (((threadIdx.x >> 2) + i) & 7) << 2;
const ui32 bin1 = bfe(ci1, 28 - f, 4);
const ui32 bin2 = bfe(ci2, 28 - f, 4);
const ui32 invBin1 = (~bin1) & 15;
const ui32 invBin2 = (~bin2) & 15;
//00 01 10 11
const ui32 bins = (invBin1 & invBin2) | ((invBin1 & bin2) << 8) | ((bin1 & invBin2) << 16) | ((bin1 & bin2) << 24);
#pragma unroll 2
for (int currentHist = 0; currentHist < 4; ++currentHist) {
const int histOffset = (threadIdx.x + currentHist) & 3;
const int bin = (bins >> (histOffset << 3)) & 15;
// 32 * bin + 4 * featureId + histId
//512 floats per warp
syncTile.sync();
Slice[f + (bin << 5) + histOffset] += w;
}
}
}
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
__forceinline__ __device__ void Reduce() {
__syncthreads();
Slice -= SliceOffset();
float sum = 0.f;
if (threadIdx.x < 512) {
const int warpCount = BlockSize / 32;
int binId = threadIdx.x / 32;
const int x = threadIdx.x & 31;
Slice += 32 * binId + x;
for (int warpId = 0; warpId < warpCount; ++warpId) {
sum += Slice[warpId * 512];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Slice[0] = sum;
}
__syncthreads();
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBinaryPairs(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int maxBlocksPerPart = gridDim.x / ((fCount + 31) / 32);
{
const int featureOffset = (blockIdx.x / maxBlocksPerPart) * 32;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 32);
}
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * ((ui64)histLineSize * 4ULL);
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * ((ui64)histLineSize) * 4ULL;
}
__shared__ float localHist[16 * BlockSize];
if (partition->Size == 0) {
return;
}
const int innerUnroll = 1;
const int outerUnroll = 1;
const int minDocsPerBlock = BlockSize * innerUnroll * 8;
const int localBlockIdx = blockIdx.x % maxBlocksPerPart;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, maxBlocksPerPart);
if (localBlockIdx >= activeBlockCount) {
return;
}
{
using THist = TPairBinaryHist<BlockSize>;
THist hist(localHist);
ComputePairHistogram<BlockSize, innerUnroll, outerUnroll, THist >(partition->Offset, partition->Size,
cindex, pairs, weight,
localBlockIdx, activeBlockCount,
hist);
}
const int histId = threadIdx.x & 3;
const int fid = (threadIdx.x >> 2);
__syncthreads();
if (fid < fCount) {
float sum = 0;
const int groupId = fid / 4;
const int fixedBitId = 3 - fid % 4;
const int activeMask = (1 << fixedBitId);
//fix i'th bit and iterate through others
#pragma unroll 1
for (int i = 0; i < 16; ++i) {
if (i & activeMask) {
sum += localHist[32 * i + 4 * groupId + histId];
}
}
if (abs(sum) > 1e-20f) {
atomicAdd(histogram + feature[fid].FirstFoldIndex * 4 + histId, sum);
}
}
}
void ComputePairwiseHistogramBinary(const TCFeature* features,const TCFeature*,
const ui32 featureCount,
const ui32 binFeatureCount,
const ui32* compressedIndex,
const uint2* pairs,
ui32 /*pairCount*/,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
Y_ASSERT(featureCount == binFeatureCount);
if (featureCount > 0) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.x = (featureCount + 31) / 32;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 2, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesBinaryPairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
| 7975e50e6af74af68cb5c61b31ff6283c26fd993.cu | #include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
template <int BlockSize>
struct TPairBinaryHist {
float* Slice;
__forceinline__ __device__ int HistSize() {
return BlockSize * 16;
}
__forceinline__ __device__ int SliceOffset() {
return 512 * (threadIdx.x >> 5);
}
__forceinline__ __device__ TPairBinaryHist(float* buff) {
Slice = buff;
for (int i = threadIdx.x; i < HistSize(); i += BlockSize) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll 1
for (int i = 0; i < 8; i++) {
uchar f = (((threadIdx.x >> 2) + i) & 7) << 2;
const ui32 bin1 = bfe(ci1, 28 - f, 4);
const ui32 bin2 = bfe(ci2, 28 - f, 4);
const ui32 invBin1 = (~bin1) & 15;
const ui32 invBin2 = (~bin2) & 15;
//00 01 10 11
const ui32 bins = (invBin1 & invBin2) | ((invBin1 & bin2) << 8) | ((bin1 & invBin2) << 16) | ((bin1 & bin2) << 24);
#pragma unroll 2
for (int currentHist = 0; currentHist < 4; ++currentHist) {
const int histOffset = (threadIdx.x + currentHist) & 3;
const int bin = (bins >> (histOffset << 3)) & 15;
// 32 * bin + 4 * featureId + histId
//512 floats per warp
syncTile.sync();
Slice[f + (bin << 5) + histOffset] += w;
}
}
}
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
__forceinline__ __device__ void Reduce() {
__syncthreads();
Slice -= SliceOffset();
float sum = 0.f;
if (threadIdx.x < 512) {
const int warpCount = BlockSize / 32;
int binId = threadIdx.x / 32;
const int x = threadIdx.x & 31;
Slice += 32 * binId + x;
for (int warpId = 0; warpId < warpCount; ++warpId) {
sum += Slice[warpId * 512];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Slice[0] = sum;
}
__syncthreads();
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBinaryPairs(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int maxBlocksPerPart = gridDim.x / ((fCount + 31) / 32);
{
const int featureOffset = (blockIdx.x / maxBlocksPerPart) * 32;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 32);
}
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * ((ui64)histLineSize * 4ULL);
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * ((ui64)histLineSize) * 4ULL;
}
__shared__ float localHist[16 * BlockSize];
if (partition->Size == 0) {
return;
}
const int innerUnroll = 1;
const int outerUnroll = 1;
const int minDocsPerBlock = BlockSize * innerUnroll * 8;
const int localBlockIdx = blockIdx.x % maxBlocksPerPart;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, maxBlocksPerPart);
if (localBlockIdx >= activeBlockCount) {
return;
}
{
using THist = TPairBinaryHist<BlockSize>;
THist hist(localHist);
ComputePairHistogram<BlockSize, innerUnroll, outerUnroll, THist >(partition->Offset, partition->Size,
cindex, pairs, weight,
localBlockIdx, activeBlockCount,
hist);
}
const int histId = threadIdx.x & 3;
const int fid = (threadIdx.x >> 2);
__syncthreads();
if (fid < fCount) {
float sum = 0;
const int groupId = fid / 4;
const int fixedBitId = 3 - fid % 4;
const int activeMask = (1 << fixedBitId);
//fix i'th bit and iterate through others
#pragma unroll 1
for (int i = 0; i < 16; ++i) {
if (i & activeMask) {
sum += localHist[32 * i + 4 * groupId + histId];
}
}
if (abs(sum) > 1e-20f) {
atomicAdd(histogram + feature[fid].FirstFoldIndex * 4 + histId, sum);
}
}
}
void ComputePairwiseHistogramBinary(const TCFeature* features,const TCFeature*,
const ui32 featureCount,
const ui32 binFeatureCount,
const ui32* compressedIndex,
const uint2* pairs,
ui32 /*pairCount*/,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
Y_ASSERT(featureCount == binFeatureCount);
if (featureCount > 0) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.x = (featureCount + 31) / 32;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 2, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesBinaryPairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
|
a8b650c09fdc36df8866c5582399c547f0dcdcc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coords_manager.hpp"
#include <pybind11/pybind11.h>
namespace py = pybind11;
namespace minkowski {
namespace detail {
template <typename SrcType, typename DstType>
__global__ void dtypeCopy(SrcType const *src, DstType *dst, size_t n) {
CUDA_KERNEL_LOOP(index, n) { dst[index] = src[index]; }
}
} // namespace detail
template <typename MapType>
const pInOutMaps<int>
CoordsManager<MapType>::copyInOutMapToGPU(const InOutMaps<int> &map) {
pInOutMaps<int> d_map;
const int n = getInOutMapsSize(map);
int *d_scr = (int *)gpu_memory_manager.gpuMalloc(n * sizeof(int));
for (const auto &cmap : map) {
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(hipMemcpy(d_scr, cmap.data(), cmap.size() * sizeof(int),
hipMemcpyHostToDevice));
d_map.push_back(pVector<int>(d_scr, cmap.size()));
d_scr += cmap.size();
}
return d_map;
}
template <typename MapType>
void CoordsManager<MapType>::copyInOutMapsToGPU(const InOutMapKey &map_key) {
if (d_in_maps.find(map_key) == d_in_maps.end()) {
ASSERT(in_maps.find(map_key) != in_maps.end(),
"The InOutMap doesn't exists.");
d_in_maps[map_key] = copyInOutMapToGPU(in_maps[map_key]);
d_out_maps[map_key] = copyInOutMapToGPU(out_maps[map_key]);
}
}
template <typename MapType>
const pInOutMapsRefPair<int> CoordsManager<MapType>::getInOutMapsGPU(
const vector<int> &tensor_strides, const vector<int> &strides,
const vector<int> &kernel_sizes, const vector<int> &dilations,
int region_type, const at::Tensor &offsets, py::object py_in_coords_key,
py::object py_out_coords_key, bool is_transpose, bool is_pool,
bool force_creation) {
const auto &in_out =
getInOutMaps(tensor_strides, strides, kernel_sizes, dilations,
region_type, offsets, py_in_coords_key, py_out_coords_key,
is_transpose, is_pool, force_creation);
const InOutMapKey map_key = getMapHashKey(
tensor_strides, strides, kernel_sizes, dilations, region_type,
py_in_coords_key, py_out_coords_key, is_transpose, is_pool);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int>
CoordsManager<MapType>::getOriginInOutMapsGPU(py::object py_in_coords_key,
py::object py_glob_coords_key) {
const auto &in_out = getOriginInOutMaps(py_in_coords_key, py_glob_coords_key);
const InOutMapKey map_key =
getOriginMapHashKey(py_in_coords_key, py_glob_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int>
CoordsManager<MapType>::getPruningInOutMapsGPU(at::Tensor use_feat,
py::object py_in_coords_key,
py::object py_out_coords_key) {
const auto &in_out =
getPruningInOutMaps(use_feat, py_in_coords_key, py_out_coords_key);
const InOutMapKey map_key =
getOriginMapHashKey(py_in_coords_key, py_out_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int> CoordsManager<MapType>::getUnionInOutMapsGPU(
vector<py::object> py_in_coords_keys, py::object py_out_coords_key) {
const auto &in_out = getUnionInOutMaps(py_in_coords_keys, py_out_coords_key);
const InOutMapKey map_key =
getUnionMapHashKey(py_in_coords_keys, py_out_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
/*
* Given tensor_stride_src and tensor_stride_dst, find the respective coord_maps
* and return the indices of the coord_map_ind in coord_map_dst
*/
template <typename MapType>
vector<vector<at::Tensor>> CoordsManager<MapType>::getKernelMapGPU(
vector<int> tensor_strides, vector<int> strides, vector<int> kernel_sizes,
vector<int> dilations, int region_type, at::Tensor offsets,
py::object py_in_coords_key, py::object py_out_coords_key,
bool is_transpose, bool is_pool) {
// WARNING: This function will not work properly with custon region types.
ASSERT(region_type != 2,
"Currently, it does not support the custom region type.");
const InOutMapKey map_key = getMapHashKey(
tensor_strides, strides, kernel_sizes, dilations, region_type,
py_in_coords_key, py_out_coords_key, is_transpose, is_pool);
const auto &in_out = getInOutMapsGPU(
tensor_strides, strides, kernel_sizes, dilations, region_type, offsets,
py_in_coords_key, py_out_coords_key, false);
const pInOutMaps<int> &in_maps = in_out.first;
const pInOutMaps<int> &out_maps = in_out.second;
int all_volume = 0, kernel_volume = in_maps.size();
for (int k = 0; k < kernel_volume; k++)
all_volume += in_maps[k].size();
// CUDA_CHECK(hipGetDevice(&device_id));
torch::TensorOptions options =
torch::TensorOptions()
.dtype(torch::kInt64)
// .device(torch::kCUDA)
.device(torch::kCUDA, gpu_memory_manager.device_id)
.requires_grad(false);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
vector<at::Tensor> in_tensors, out_tensors;
for (int k = 0; k < kernel_volume; k++) {
auto curr_volume = in_maps[k].size();
if (curr_volume <= 0)
continue;
at::Tensor in_kernel_map =
torch::empty({(long)curr_volume}, options).contiguous();
at::Tensor out_kernel_map =
torch::empty({(long)curr_volume}, options).contiguous();
// Wait until both memory chunks are allocated
CUDA_CHECK(hipStreamSynchronize(stream));
hipLaunchKernelGGL(( detail::dtypeCopy<int, long>)
, dim3(GET_BLOCKS(curr_volume)), dim3(CUDA_NUM_THREADS), 0, stream,
in_maps[k].data(), in_kernel_map.data<long>(), curr_volume);
hipLaunchKernelGGL(( detail::dtypeCopy<int, long>)
, dim3(GET_BLOCKS(curr_volume)), dim3(CUDA_NUM_THREADS), 0, stream,
out_maps[k].data(), out_kernel_map.data<long>(), curr_volume);
in_tensors.push_back(move(in_kernel_map));
out_tensors.push_back(move(out_kernel_map));
}
return {in_tensors, out_tensors};
}
template class CoordsManager<CoordsToIndexMap>;
// template class CoordsManager<CoordsToVectorMap>;
} // end namespace minkowski
| a8b650c09fdc36df8866c5582399c547f0dcdcc1.cu | /* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coords_manager.hpp"
#include <pybind11/pybind11.h>
namespace py = pybind11;
namespace minkowski {
namespace detail {
template <typename SrcType, typename DstType>
__global__ void dtypeCopy(SrcType const *src, DstType *dst, size_t n) {
CUDA_KERNEL_LOOP(index, n) { dst[index] = src[index]; }
}
} // namespace detail
template <typename MapType>
const pInOutMaps<int>
CoordsManager<MapType>::copyInOutMapToGPU(const InOutMaps<int> &map) {
pInOutMaps<int> d_map;
const int n = getInOutMapsSize(map);
int *d_scr = (int *)gpu_memory_manager.gpuMalloc(n * sizeof(int));
for (const auto &cmap : map) {
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(cudaMemcpy(d_scr, cmap.data(), cmap.size() * sizeof(int),
cudaMemcpyHostToDevice));
d_map.push_back(pVector<int>(d_scr, cmap.size()));
d_scr += cmap.size();
}
return d_map;
}
template <typename MapType>
void CoordsManager<MapType>::copyInOutMapsToGPU(const InOutMapKey &map_key) {
if (d_in_maps.find(map_key) == d_in_maps.end()) {
ASSERT(in_maps.find(map_key) != in_maps.end(),
"The InOutMap doesn't exists.");
d_in_maps[map_key] = copyInOutMapToGPU(in_maps[map_key]);
d_out_maps[map_key] = copyInOutMapToGPU(out_maps[map_key]);
}
}
template <typename MapType>
const pInOutMapsRefPair<int> CoordsManager<MapType>::getInOutMapsGPU(
const vector<int> &tensor_strides, const vector<int> &strides,
const vector<int> &kernel_sizes, const vector<int> &dilations,
int region_type, const at::Tensor &offsets, py::object py_in_coords_key,
py::object py_out_coords_key, bool is_transpose, bool is_pool,
bool force_creation) {
const auto &in_out =
getInOutMaps(tensor_strides, strides, kernel_sizes, dilations,
region_type, offsets, py_in_coords_key, py_out_coords_key,
is_transpose, is_pool, force_creation);
const InOutMapKey map_key = getMapHashKey(
tensor_strides, strides, kernel_sizes, dilations, region_type,
py_in_coords_key, py_out_coords_key, is_transpose, is_pool);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int>
CoordsManager<MapType>::getOriginInOutMapsGPU(py::object py_in_coords_key,
py::object py_glob_coords_key) {
const auto &in_out = getOriginInOutMaps(py_in_coords_key, py_glob_coords_key);
const InOutMapKey map_key =
getOriginMapHashKey(py_in_coords_key, py_glob_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int>
CoordsManager<MapType>::getPruningInOutMapsGPU(at::Tensor use_feat,
py::object py_in_coords_key,
py::object py_out_coords_key) {
const auto &in_out =
getPruningInOutMaps(use_feat, py_in_coords_key, py_out_coords_key);
const InOutMapKey map_key =
getOriginMapHashKey(py_in_coords_key, py_out_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
template <typename MapType>
const pInOutMapsRefPair<int> CoordsManager<MapType>::getUnionInOutMapsGPU(
vector<py::object> py_in_coords_keys, py::object py_out_coords_key) {
const auto &in_out = getUnionInOutMaps(py_in_coords_keys, py_out_coords_key);
const InOutMapKey map_key =
getUnionMapHashKey(py_in_coords_keys, py_out_coords_key);
copyInOutMapsToGPU(map_key);
return make_pair(ref(d_in_maps[map_key]), ref(d_out_maps[map_key]));
}
/*
* Given tensor_stride_src and tensor_stride_dst, find the respective coord_maps
* and return the indices of the coord_map_ind in coord_map_dst
*/
template <typename MapType>
vector<vector<at::Tensor>> CoordsManager<MapType>::getKernelMapGPU(
vector<int> tensor_strides, vector<int> strides, vector<int> kernel_sizes,
vector<int> dilations, int region_type, at::Tensor offsets,
py::object py_in_coords_key, py::object py_out_coords_key,
bool is_transpose, bool is_pool) {
// WARNING: This function will not work properly with custon region types.
ASSERT(region_type != 2,
"Currently, it does not support the custom region type.");
const InOutMapKey map_key = getMapHashKey(
tensor_strides, strides, kernel_sizes, dilations, region_type,
py_in_coords_key, py_out_coords_key, is_transpose, is_pool);
const auto &in_out = getInOutMapsGPU(
tensor_strides, strides, kernel_sizes, dilations, region_type, offsets,
py_in_coords_key, py_out_coords_key, false);
const pInOutMaps<int> &in_maps = in_out.first;
const pInOutMaps<int> &out_maps = in_out.second;
int all_volume = 0, kernel_volume = in_maps.size();
for (int k = 0; k < kernel_volume; k++)
all_volume += in_maps[k].size();
// CUDA_CHECK(cudaGetDevice(&device_id));
torch::TensorOptions options =
torch::TensorOptions()
.dtype(torch::kInt64)
// .device(torch::kCUDA)
.device(torch::kCUDA, gpu_memory_manager.device_id)
.requires_grad(false);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
vector<at::Tensor> in_tensors, out_tensors;
for (int k = 0; k < kernel_volume; k++) {
auto curr_volume = in_maps[k].size();
if (curr_volume <= 0)
continue;
at::Tensor in_kernel_map =
torch::empty({(long)curr_volume}, options).contiguous();
at::Tensor out_kernel_map =
torch::empty({(long)curr_volume}, options).contiguous();
// Wait until both memory chunks are allocated
CUDA_CHECK(cudaStreamSynchronize(stream));
detail::dtypeCopy<int, long>
<<<GET_BLOCKS(curr_volume), CUDA_NUM_THREADS, 0, stream>>>(
in_maps[k].data(), in_kernel_map.data<long>(), curr_volume);
detail::dtypeCopy<int, long>
<<<GET_BLOCKS(curr_volume), CUDA_NUM_THREADS, 0, stream>>>(
out_maps[k].data(), out_kernel_map.data<long>(), curr_volume);
in_tensors.push_back(move(in_kernel_map));
out_tensors.push_back(move(out_kernel_map));
}
return {in_tensors, out_tensors};
}
template class CoordsManager<CoordsToIndexMap>;
// template class CoordsManager<CoordsToVectorMap>;
} // end namespace minkowski
|
232bd36351ce81d74f1103f13c2446cf2f109547.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "BondedGroupData.cuh"
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file BondedGroupData.cu
\brief Implements the helper functions (GPU version) for updating the GPU bonded group tables
*/
namespace hoomd
{
template<unsigned int group_size, typename group_t>
__global__ void gpu_count_groups_kernel(const unsigned int n_groups,
const group_t* d_group_table,
const unsigned int* d_rtag,
unsigned int* d_scratch_idx,
unsigned int* d_scratch_g,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_groups)
return;
group_t g = d_group_table[group_idx];
for (unsigned int i = 0; i < group_size; ++i)
{
unsigned int tag_i = g.tag[i];
unsigned int pidx_i = d_rtag[tag_i];
// detect incomplete groups
if (pidx_i == NOT_LOCAL)
atomicMax(d_condition, next_flag + 1 + group_idx);
// write out group_idx to temporary array
d_scratch_g[i * n_groups + group_idx] = group_idx;
d_scratch_idx[i * n_groups + group_idx] = pidx_i;
// atomically increment number of groups
unsigned int n = 0;
if (pidx_i != NOT_LOCAL)
n = atomicInc(&d_n_groups[pidx_i], 0xffffffff);
if (n >= max_n_groups)
// set flag to indicate we need to grow the output array
atomicMax(d_condition, next_flag);
}
}
template<unsigned int group_size, typename group_t>
__global__ void gpu_group_scatter_kernel(unsigned int n_scratch,
const unsigned int* d_scratch_g,
const unsigned int* d_scratch_idx,
const unsigned int* d_offset,
const group_t* d_members,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
unsigned int pidx_group_table_pitch,
bool has_type_mapping)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_scratch)
return;
unsigned int pidx = d_scratch_idx[i];
unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx;
// load group
unsigned int group_idx = d_scratch_g[i];
group_t g = d_members[group_idx];
// construct compact group representation, excluding particle pidx
group_t p;
if (has_type_mapping)
{
// last element = group type
p.idx[group_size - 1] = d_group_typeval[group_idx].type;
}
else
{
// last element = group index
p.idx[group_size - 1] = group_idx;
}
unsigned int j = 0;
// position in group
unsigned int gpos = 0;
for (unsigned int k = 0; k < group_size; ++k)
{
unsigned int tag_k = g.tag[k];
unsigned int pidx_k = d_rtag[tag_k];
if (pidx_k == pidx)
{
gpos = k;
continue;
}
p.idx[j++] = pidx_k;
}
d_pidx_group_table[offset] = p;
d_pidx_gpos_table[offset] = gpos;
}
template<unsigned int group_size, typename group_t>
void gpu_update_group_table(const unsigned int n_groups,
const unsigned int N,
const group_t* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc)
{
// construct scratch table by expanding the group table by particle index
unsigned int block_size = 256;
unsigned n_blocks = n_groups / block_size + 1;
// reset number of groups
hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>),
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups,
d_group_table,
d_rtag,
d_scratch_idx,
d_scratch_g,
d_n_groups,
max_n_groups,
d_condition,
next_flag);
// read back flag
hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (!(flag >= next_flag) && n_groups)
{
// we are good, fill group table
// sort groups by particle idx
thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx);
thrust::device_ptr<unsigned int> scratch_g(d_scratch_g);
#ifdef __HIP_PLATFORM_HCC__
thrust::sort_by_key(thrust::hip::par(alloc),
#else
thrust::sort_by_key(thrust::hip::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
scratch_g);
// perform a segmented scan of d_scratch_idx
thrust::device_ptr<unsigned int> offsets(d_offsets);
thrust::constant_iterator<unsigned int> const_it(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
const_it,
offsets);
// scatter groups to destinations
block_size = 256;
n_blocks = (group_size * n_groups) / block_size + 1;
hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>,
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups * group_size,
d_scratch_g,
d_scratch_idx,
d_offsets,
d_group_table,
d_group_typeval,
d_rtag,
d_pidx_group_table,
d_pidx_gpos_table,
pidx_group_table_pitch,
has_type_mapping);
}
}
/*
* Explicit template instantiations
*/
//! BondData
template void gpu_update_group_table<2>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<2>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<2>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! AngleData
template void gpu_update_group_table<3>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<3>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<3>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! DihedralData and ImproperData
template void gpu_update_group_table<4>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<4>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<4>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
} // end namespace hoomd
| 232bd36351ce81d74f1103f13c2446cf2f109547.cu | // Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "BondedGroupData.cuh"
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file BondedGroupData.cu
\brief Implements the helper functions (GPU version) for updating the GPU bonded group tables
*/
namespace hoomd
{
template<unsigned int group_size, typename group_t>
__global__ void gpu_count_groups_kernel(const unsigned int n_groups,
const group_t* d_group_table,
const unsigned int* d_rtag,
unsigned int* d_scratch_idx,
unsigned int* d_scratch_g,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_groups)
return;
group_t g = d_group_table[group_idx];
for (unsigned int i = 0; i < group_size; ++i)
{
unsigned int tag_i = g.tag[i];
unsigned int pidx_i = d_rtag[tag_i];
// detect incomplete groups
if (pidx_i == NOT_LOCAL)
atomicMax(d_condition, next_flag + 1 + group_idx);
// write out group_idx to temporary array
d_scratch_g[i * n_groups + group_idx] = group_idx;
d_scratch_idx[i * n_groups + group_idx] = pidx_i;
// atomically increment number of groups
unsigned int n = 0;
if (pidx_i != NOT_LOCAL)
n = atomicInc(&d_n_groups[pidx_i], 0xffffffff);
if (n >= max_n_groups)
// set flag to indicate we need to grow the output array
atomicMax(d_condition, next_flag);
}
}
template<unsigned int group_size, typename group_t>
__global__ void gpu_group_scatter_kernel(unsigned int n_scratch,
const unsigned int* d_scratch_g,
const unsigned int* d_scratch_idx,
const unsigned int* d_offset,
const group_t* d_members,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
unsigned int pidx_group_table_pitch,
bool has_type_mapping)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_scratch)
return;
unsigned int pidx = d_scratch_idx[i];
unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx;
// load group
unsigned int group_idx = d_scratch_g[i];
group_t g = d_members[group_idx];
// construct compact group representation, excluding particle pidx
group_t p;
if (has_type_mapping)
{
// last element = group type
p.idx[group_size - 1] = d_group_typeval[group_idx].type;
}
else
{
// last element = group index
p.idx[group_size - 1] = group_idx;
}
unsigned int j = 0;
// position in group
unsigned int gpos = 0;
for (unsigned int k = 0; k < group_size; ++k)
{
unsigned int tag_k = g.tag[k];
unsigned int pidx_k = d_rtag[tag_k];
if (pidx_k == pidx)
{
gpos = k;
continue;
}
p.idx[j++] = pidx_k;
}
d_pidx_group_table[offset] = p;
d_pidx_gpos_table[offset] = gpos;
}
template<unsigned int group_size, typename group_t>
void gpu_update_group_table(const unsigned int n_groups,
const unsigned int N,
const group_t* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc)
{
// construct scratch table by expanding the group table by particle index
unsigned int block_size = 256;
unsigned n_blocks = n_groups / block_size + 1;
// reset number of groups
hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>),
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups,
d_group_table,
d_rtag,
d_scratch_idx,
d_scratch_g,
d_n_groups,
max_n_groups,
d_condition,
next_flag);
// read back flag
hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (!(flag >= next_flag) && n_groups)
{
// we are good, fill group table
// sort groups by particle idx
thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx);
thrust::device_ptr<unsigned int> scratch_g(d_scratch_g);
#ifdef __HIP_PLATFORM_HCC__
thrust::sort_by_key(thrust::hip::par(alloc),
#else
thrust::sort_by_key(thrust::cuda::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
scratch_g);
// perform a segmented scan of d_scratch_idx
thrust::device_ptr<unsigned int> offsets(d_offsets);
thrust::constant_iterator<unsigned int> const_it(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::cuda::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
const_it,
offsets);
// scatter groups to destinations
block_size = 256;
n_blocks = (group_size * n_groups) / block_size + 1;
hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>,
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups * group_size,
d_scratch_g,
d_scratch_idx,
d_offsets,
d_group_table,
d_group_typeval,
d_rtag,
d_pidx_group_table,
d_pidx_gpos_table,
pidx_group_table_pitch,
has_type_mapping);
}
}
/*
* Explicit template instantiations
*/
//! BondData
template void gpu_update_group_table<2>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<2>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<2>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! AngleData
template void gpu_update_group_table<3>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<3>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<3>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! DihedralData and ImproperData
template void gpu_update_group_table<4>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<4>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<4>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
} // end namespace hoomd
|
41649cbb84452cbff19e857cec98cddb5a9560d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <ctype.h>
#include <errno.h>
/* find the appropriate way to define explicitly sized types */
#if (__STDC_VERSION__ >= 199900) || defined(__GLIBC__) /* C99 or GNU libc */
#include <stdint.h>
#elif defined(__unix__) || defined(unix) || defined(__MACH__)
#include <sys/types.h>
#elif defined(_MSC_VER) /* the nameless one */
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
void render1(int xsz, int ysz, u_int32_t *host_fb, int samples);
__global__ void render2(u_int32_t *device_fb, int samples, int xsz, int ysz);
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const hipError_t err, const char *file, const int line)
{
if( hipSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, hipGetErrorString(err) );
exit(1);
}
}
int main(int argc, char **argv) {
int xres = 100;
int yres = 100;
int samples = 1;
u_int32_t *array;
if(!(array = (u_int32_t *)malloc(xres * yres * sizeof(u_int32_t)))) {
perror("pixel buffer allocation failed");
return EXIT_FAILURE;
}
render1(xres, yres, array, samples);
for(int i = 0; i<xres; i++) {
for(int j = 0; j<yres; j++) {
printf("%i, %i: %i\n", i, j, array[i + sizeof(u_int32_t)*j]);
}
}
free(array);
return 0;
}
void render1(int xsz, int ysz, u_int32_t *host_fb, int samples) {
dim3 threads_per_block(16, 16);
int whole_blocks_x = xsz/threads_per_block.x;
int whole_blocks_y = ysz/threads_per_block.y;
int remainder_threads_x = xsz % threads_per_block.x;
int remainder_threads_y = ysz % threads_per_block.y;
int extra_block_x = 0;
int extra_block_y = 0;
if (remainder_threads_x > 0) {
extra_block_x = 1;
}
if (remainder_threads_y > 0) {
extra_block_y = 1;
}
int num_blocks_x = whole_blocks_x + extra_block_x;
int num_blocks_y = whole_blocks_y + extra_block_y;
dim3 num_blocks(num_blocks_x, num_blocks_y);
size_t arr_size = xsz * ysz * sizeof(u_int32_t);
u_int32_t *device_fb = 0;
cudaErrorCheck(hipMalloc((void **)&device_fb, arr_size));
cudaErrorCheck(hipMemcpy(device_fb, host_fb, arr_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( render2), dim3(num_blocks),dim3(threads_per_block), 0, 0, device_fb, samples, xsz, ysz);
hipPeekAtLastError(); // Checks for launch error
cudaErrorCheck( hipDeviceSynchronize() );
cudaErrorCheck(hipMemcpy(host_fb, device_fb, arr_size, hipMemcpyDeviceToHost));
cudaErrorCheck( hipFree(device_fb) );
}
__global__ void render2(u_int32_t *device_fb, int samples, int xsz, int ysz) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i > xsz) || (j > ysz)) {
return;
} else {
device_fb[i + j*sizeof(u_int32_t)] = i*j;
return;
}
}
| 41649cbb84452cbff19e857cec98cddb5a9560d7.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <ctype.h>
#include <errno.h>
/* find the appropriate way to define explicitly sized types */
#if (__STDC_VERSION__ >= 199900) || defined(__GLIBC__) /* C99 or GNU libc */
#include <stdint.h>
#elif defined(__unix__) || defined(unix) || defined(__MACH__)
#include <sys/types.h>
#elif defined(_MSC_VER) /* the nameless one */
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
void render1(int xsz, int ysz, u_int32_t *host_fb, int samples);
__global__ void render2(u_int32_t *device_fb, int samples, int xsz, int ysz);
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const cudaError err, const char *file, const int line)
{
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, cudaGetErrorString(err) );
exit(1);
}
}
int main(int argc, char **argv) {
int xres = 100;
int yres = 100;
int samples = 1;
u_int32_t *array;
if(!(array = (u_int32_t *)malloc(xres * yres * sizeof(u_int32_t)))) {
perror("pixel buffer allocation failed");
return EXIT_FAILURE;
}
render1(xres, yres, array, samples);
for(int i = 0; i<xres; i++) {
for(int j = 0; j<yres; j++) {
printf("%i, %i: %i\n", i, j, array[i + sizeof(u_int32_t)*j]);
}
}
free(array);
return 0;
}
void render1(int xsz, int ysz, u_int32_t *host_fb, int samples) {
dim3 threads_per_block(16, 16);
int whole_blocks_x = xsz/threads_per_block.x;
int whole_blocks_y = ysz/threads_per_block.y;
int remainder_threads_x = xsz % threads_per_block.x;
int remainder_threads_y = ysz % threads_per_block.y;
int extra_block_x = 0;
int extra_block_y = 0;
if (remainder_threads_x > 0) {
extra_block_x = 1;
}
if (remainder_threads_y > 0) {
extra_block_y = 1;
}
int num_blocks_x = whole_blocks_x + extra_block_x;
int num_blocks_y = whole_blocks_y + extra_block_y;
dim3 num_blocks(num_blocks_x, num_blocks_y);
size_t arr_size = xsz * ysz * sizeof(u_int32_t);
u_int32_t *device_fb = 0;
cudaErrorCheck(cudaMalloc((void **)&device_fb, arr_size));
cudaErrorCheck(cudaMemcpy(device_fb, host_fb, arr_size, cudaMemcpyHostToDevice));
render2<<<num_blocks,threads_per_block>>>(device_fb, samples, xsz, ysz);
cudaPeekAtLastError(); // Checks for launch error
cudaErrorCheck( cudaThreadSynchronize() );
cudaErrorCheck(cudaMemcpy(host_fb, device_fb, arr_size, cudaMemcpyDeviceToHost));
cudaErrorCheck( cudaFree(device_fb) );
}
__global__ void render2(u_int32_t *device_fb, int samples, int xsz, int ysz) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i > xsz) || (j > ysz)) {
return;
} else {
device_fb[i + j*sizeof(u_int32_t)] = i*j;
return;
}
}
|
fa083d4f2e345d29d6d71713b685af9c74cd212d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
/* derivative of helva, from Mathematica */
__device__ hipComplex helvp(hipComplex z)
{
hipComplex out(jnf(2,z.r),jnf(1,z.i));
return out;
}
__device__ hipComplex lanna(hipComplex z)
{
hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r)));
return out;
}
__device__ hipComplex harva(hipComplex z)
{
hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i));
return out;
}
__device__ hipComplex herve(hipComplex z)
{
hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r));
return out;
}
__device__ hipComplex alver(hipComplex z)
{
hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ hipComplex alvir(hipComplex z)
{
hipComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ hipComplex hexva(int m, hipComplex z)
{
hipComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halvi(hipComplex z)
{
hipComplex out(j1f(z.r),-j0f(z.i));
return out;
}
__device__ hipComplex ahilv(hipComplex z)
{
hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex aciwa(hipComplex z)
{
hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thy(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex origo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale =10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2)+10;
float fy = scale * (float)(DIM/2 - r)/(DIM/2)+10;
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex tin(1/8.0,0.0);
hipComplex aon = expc(tin*ai*moux);
hipComplex uon= expc(tin*mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex vue = cue;
hipComplex rhuva(3.0,0.0);
hipComplex rarva(3.0,0.0);
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<5;v++)
{
nue = cue;
cue = cue - (conj(irigo(cue,uon*faxon))-thy(cue,aon*fixon))/(irigo(cue+irigo(cue,aon*fixon),uon*fixon)/(irigo(cue,aon*fixon)-unity));
accume = accume * expc(irigo(cue,aon*faxon)) * (nue/cue);
}
cue = accume;
/*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | fa083d4f2e345d29d6d71713b685af9c74cd212d.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
/* derivative of helva, from Mathematica */
__device__ cuComplex helvp(cuComplex z)
{
cuComplex out(jnf(2,z.r),jnf(1,z.i));
return out;
}
__device__ cuComplex lanna(cuComplex z)
{
cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r)));
return out;
}
__device__ cuComplex harva(cuComplex z)
{
cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i));
return out;
}
__device__ cuComplex herve(cuComplex z)
{
cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r));
return out;
}
__device__ cuComplex alver(cuComplex z)
{
cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ cuComplex alvir(cuComplex z)
{
cuComplex out(j0f(z.r),1.0/j1f(z.i));
return out;
}
__device__ cuComplex hexva(int m, cuComplex z)
{
cuComplex out(jnf(m,z.r),jnf(m,z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halvi(cuComplex z)
{
cuComplex out(j1f(z.r),-j0f(z.i));
return out;
}
__device__ cuComplex ahilv(cuComplex z)
{
cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex aciwa(cuComplex z)
{
cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thy(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex origo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale =10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2)+10;
float fy = scale * (float)(DIM/2 - r)/(DIM/2)+10;
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex tin(1/8.0,0.0);
cuComplex aon = expc(tin*ai*moux);
cuComplex uon= expc(tin*mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex vue = cue;
cuComplex rhuva(3.0,0.0);
cuComplex rarva(3.0,0.0);
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<5;v++)
{
nue = cue;
cue = cue - (conj(irigo(cue,uon*faxon))-thy(cue,aon*fixon))/(irigo(cue+irigo(cue,aon*fixon),uon*fixon)/(irigo(cue,aon*fixon)-unity));
accume = accume * expc(irigo(cue,aon*faxon)) * (nue/cue);
}
cue = accume;
/*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
67ebc3a954498e41a66af6504ce6f6613a93f716.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VX3_SimulationManager.cuh"
#include "ctool.h"
#include <boost/algorithm/string/case_conv.hpp>
#include <queue>
#include <stack>
#include <utility>
#include "VX3_VoxelyzeKernel.cuh"
#include "VX_Sim.h" //readVXA
__global__ void CUDA_Simulation(VX3_VoxelyzeKernel *d_voxelyze_3, int num_simulation, int device_index) {
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < num_simulation) {
VX3_VoxelyzeKernel *d_v3 = &d_voxelyze_3[thread_index];
if (d_v3->num_d_links == 0 and d_v3->num_d_voxels == 0) {
printf(COLORCODE_BOLD_RED "No links and no voxels. Simulation %d (%s) abort.\n" COLORCODE_RESET, thread_index,
d_v3->vxa_filename);
return;
}
d_v3->syncVectors(); // Everytime we pass a class with VX3_vectors in
// it, we should sync hd_vector to d_vector first.
d_v3->saveInitialPosition();
d_v3->isSurfaceChanged = true; // trigger surface regenerating and calculate normal thrust for the first time
d_v3->registerTargets();
printf(COLORCODE_GREEN "%d) Simulation %d runs: %s.\n" COLORCODE_RESET, device_index, thread_index, d_v3->vxa_filename);
// printf("%d) Simulation %d: links %d, voxels %d.\n", device_index, i,
// d_v3->num_d_links, d_v3->num_d_voxels); printf("%d) Simulation %d
// enableAttach %d.\n", device_index, i, d_v3->enableAttach);
//
// print check regenerateSurfaceVoxels() is correct. (TODO: shouldn't
// this be tested in seperate test code? :) printf("all voxels:"); for
// (int j=0;j<d_v3->num_d_voxels;j++) {
// printf(" [%d]%p ", j, &d_v3->d_voxels[j]);
// }
// printf("\nsurface:");
// for (int j=0;j<d_v3->num_d_surface_voxels;j++) {
// printf(" [%d]%p ", j, d_v3->d_surface_voxels[j]);
// }
//
if (d_v3->RecordStepSize) { // output History file
// rescale the whole space. so history file can contain less digits. ( e.g. not 0.000221, but 2.21 )
printf("\n{{{setting}}}<rescale>0.001</rescale>\n");
// materials' color
for (int i = 0; i < d_v3->num_d_voxelMats; i++) {
auto &mat = d_v3->d_voxelMats[i];
printf("{{{setting}}}<matcolor><id>%d</id><r>%.2f</r><g>%.2f</g><b>%.2f</b><a>%.2f</a></matcolor>\n", mat.matid,
mat.r / 255., mat.g / 255., mat.b / 255., mat.a / 255.);
}
printf("\n{{{setting}}}<voxel_size>%f</voxel_size>\n", d_v3->voxSize);
}
double vs = 1 / 0.001;
d_v3->updateCurrentCenterOfMass();
d_v3->InitializeCenterOfMass();
int real_stepsize = int(d_v3->RecordStepSize / (10000 * d_v3->recommendedTimeStep() * d_v3->DtFrac))+1;
printf("real_stepsize: %d ; recommendedTimeStep %f; d_v3->DtFrac %f . \n", real_stepsize, d_v3->recommendedTimeStep(),
d_v3->DtFrac);
// printf("Initial CoM: %f %f %f mm\n",
// d_v3->initialCenterOfMass.x*1000, d_v3->initialCenterOfMass.y*1000,
// d_v3->initialCenterOfMass.z*1000);
for (int j = 0; j < 1000000; j++) { // Maximum Steps 1000000
if (d_v3->StopConditionMet())
break;
if (!d_v3->doTimeStep()) {
printf(COLORCODE_BOLD_RED "\n%d) Simulation %d Diverged: %s.\n" COLORCODE_RESET, device_index, thread_index,
d_v3->vxa_filename);
break;
}
if (d_v3->RecordStepSize) { // output History file
if (j % real_stepsize == 0) {
if (d_v3->RecordVoxel) {
// Voxels
printf("<<<Step%d Time:%f>>>", j, d_v3->currentTime);
for (int i = 0; i < d_v3->num_d_surface_voxels; i++) {
auto v = d_v3->d_surface_voxels[i];
if (v->removed)
continue;
if (v->isSurface()) {
printf("%.1f,%.1f,%.1f,", v->pos.x * vs, v->pos.y * vs, v->pos.z * vs);
printf("%.1f,%.2f,%.2f,%.2f,", v->orient.AngleDegrees(), v->orient.x, v->orient.y, v->orient.z);
VX3_Vec3D<double> ppp, nnn;
nnn = v->cornerOffset(NNN);
ppp = v->cornerOffset(PPP);
printf("%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,", nnn.x * vs, nnn.y * vs, nnn.z * vs, ppp.x * vs, ppp.y * vs,
ppp.z * vs);
printf("%d,", v->mat->matid); // for coloring
printf("%.1f,", v->localSignal); // for coloring as well.
printf(";");
}
}
printf("<<<>>>");
}
if (d_v3->RecordLink) {
// Links
printf("|[[[%d]]]", j);
for (int i = 0; i < d_v3->d_v_links.size(); i++) {
auto l = d_v3->d_v_links[i];
if (l->removed)
continue;
// only draw links that are not detached.
if (!l->isDetached) {
auto v1 = l->pVPos;
printf("%.4f,%.4f,%.4f,", v1->pos.x, v1->pos.y, v1->pos.z);
auto v2 = l->pVNeg;
printf("%.4f,%.4f,%.4f,", v2->pos.x, v2->pos.y, v2->pos.z);
printf(";");
}
}
printf("[[[]]]");
}
printf("\n");
}
}
}
d_v3->updateCurrentCenterOfMass();
d_v3->computeFitness();
printf(COLORCODE_BLUE "%d) Simulation %d ends: %s Time: %f, angleSampleTimes: %d.\n" COLORCODE_RESET, device_index, thread_index,
d_v3->vxa_filename, d_v3->currentTime, d_v3->angleSampleTimes);
}
}
VX3_SimulationManager::VX3_SimulationManager(std::vector<std::vector<fs::path>> in_sub_batches, fs::path in_base, fs::path in_input_dir,
int in_num_of_devices)
: sub_batches(in_sub_batches), base(in_base), num_of_devices(in_num_of_devices), input_dir(in_input_dir) {
d_voxelyze_3s.resize(num_of_devices);
for (int i = 0; i < num_of_devices; i++) {
d_voxelyze_3s[i] = NULL;
}
}
VX3_SimulationManager::~VX3_SimulationManager() {
for (auto d : d_voxelyze_3s) {
if (d)
VcudaFree(d);
}
}
void VX3_SimulationManager::start() {
for (int device_index = 0; device_index < num_of_devices; device_index++) { // multi GPUs
auto files = sub_batches[device_index];
if (files.size()) {
VcudaSetDevice(device_index);
printf("=== set device to %d for %ld simulations ===\n", device_index, files.size());
// readVXA(base)
readVXD(base, files, device_index);
startKernel(files.size(), device_index);
}
}
VcudaDeviceSynchronize();
for (int device_index = 0; device_index < num_of_devices; device_index++) { // multi GPUs
auto files = sub_batches[device_index];
collectResults(files.size(), device_index);
}
sortResults();
}
void VX3_SimulationManager::ParseMathTree(VX3_MathTreeToken *field_ptr, size_t max_length, std::string node_address, pt::ptree &tree) {
// Classic BFS, push all token into stack
std::queue<pt::ptree> frontier;
std::stack<std::pair<std::string, std::string>> tokens;
tokens.push(make_pair((std::string) "mtEND", (std::string) ""));
auto root = tree.get_child_optional(node_address);
if (!root) {
// printf(COLORCODE_BOLD_RED "ERROR: No ParseMathTree %s in VXA.\n", node_address.c_str());
return;
}
frontier.push(tree.get_child(node_address));
while (!frontier.empty()) {
std::queue<pt::ptree> next_frontier;
auto t = frontier.front();
frontier.pop();
BOOST_FOREACH (pt::ptree::value_type &v_child, t.get_child("")) {
std::string value = v_child.second.data();
boost::trim_right(value);
std::string op = v_child.first.data();
boost::trim_right(op);
// std::cout << op << ":" << value << "\n";
tokens.push(make_pair(op, value));
frontier.push(v_child.second);
}
}
// pop from stack to VX3_MathTreeToken* (so we get a reversed order)
int i = 0;
while (!tokens.empty()) {
if (i > max_length) {
printf(COLORCODE_BOLD_RED "ERROR: Token size overflow.\n");
return;
}
std::pair<std::string, std::string> tok = tokens.top();
VX3_MathTreeToken *p = &field_ptr[i];
if (tok.first == "mtEND") {
p->op = mtEND;
} else if (tok.first == "mtVAR") {
p->op = mtVAR;
if (tok.second == "x") {
p->value = 0;
} else if (tok.second == "y") {
p->value = 1;
} else if (tok.second == "z") {
p->value = 2;
} else if (tok.second == "hit") {
p->value = 3;
} else if (tok.second == "t") {
p->value = 4;
} else if (tok.second == "angle") {
p->value = 5;
} else if (tok.second == "targetCloseness") {
p->value = 6;
} else if (tok.second == "numClosePairs") {
p->value = 7;
} else if (tok.second == "num_voxel") {
p->value = 8;
} else {
printf(COLORCODE_BOLD_RED "ERROR: No such variable.\n");
break;
}
} else if (tok.first == "mtCONST") {
p->op = mtCONST;
try {
p->value = std::stod(tok.second);
} catch(...) {
printf(COLORCODE_BOLD_RED "ERROR: mtCONST with no number.\n");
break;
}
} else if (tok.first == "mtADD") {
p->op = mtADD;
} else if (tok.first == "mtSUB") {
p->op = mtSUB;
} else if (tok.first == "mtMUL") {
p->op = mtMUL;
} else if (tok.first == "mtDIV") {
p->op = mtDIV;
} else if (tok.first == "mtPOW") {
p->op = mtPOW;
} else if (tok.first == "mtSQRT") {
p->op = mtSQRT;
} else if (tok.first == "mtE") {
p->op = mtE;
} else if (tok.first == "mtPI") {
p->op = mtPI;
} else if (tok.first == "mtSIN") {
p->op = mtSIN;
} else if (tok.first == "mtCOS") {
p->op = mtCOS;
} else if (tok.first == "mtTAN") {
p->op = mtTAN;
} else if (tok.first == "mtATAN") {
p->op = mtATAN;
} else if (tok.first == "mtLOG") {
p->op = mtLOG;
} else if (tok.first == "mtINT") {
p->op = mtINT;
} else if (tok.first == "mtABS") {
p->op = mtABS;
} else if (tok.first == "mtNOT") {
p->op = mtNOT;
} else if (tok.first == "mtGREATERTHAN") {
p->op = mtGREATERTHAN;
} else if (tok.first == "mtLESSTHAN") {
p->op = mtLESSTHAN;
} else if (tok.first == "mtAND") {
p->op = mtAND;
} else if (tok.first == "mtOR") {
p->op = mtOR;
} else if (tok.first == "mtNORMALCDF") {
p->op = mtNORMALCDF;
} else {
printf(COLORCODE_BOLD_RED "ERROR: Token Operation not implemented.\n");
break;
}
i++;
tokens.pop();
}
}
void VX3_SimulationManager::readVXD(fs::path base, std::vector<fs::path> files, int device_index) {
pt::ptree pt_baseVXA;
pt::read_xml(base.string(), pt_baseVXA);
int num_simulation = files.size();
VcudaMalloc((void **)&d_voxelyze_3s[device_index], num_simulation * sizeof(VX3_VoxelyzeKernel));
int i = 0;
for (auto &file : files) {
// Read VXD file, clone base VXA, replace parts specified in VXD, send
// to MainSim.ReadVXA to process. printf("reading %s\n",
// (input_dir/file).c_str());
pt::ptree pt_VXD;
pt::read_xml((input_dir / file).string(), pt_VXD);
pt::ptree pt_merged = pt_baseVXA;
ctool::ptree_merge(pt_VXD, pt_merged);
std::ostringstream stream_merged;
std::string str_merged;
pt::write_xml(stream_merged, pt_merged);
str_merged = stream_merged.str();
CXML_Rip XML;
XML.fromXMLText(&str_merged);
CVX_Environment MainEnv;
CVX_Sim MainSim;
CVX_Object MainObj;
MainEnv.pObj = &MainObj; // connect environment to object
MainSim.pEnv = &MainEnv; // connect Simulation to envirnment
std::string RetMessage;
// std::cout<<str_merged;
MainSim.ReadVXA(&XML, &RetMessage);
MainSim.Import(NULL, NULL, &RetMessage);
if (!RetMessage.empty()) {
printf(COLORCODE_BOLD_RED "%s\n" COLORCODE_RESET, RetMessage.c_str());
}
// for (auto m:MainSim.Vx.voxelMats) {
// int i=0;
// for (auto mm:m->dependentMaterials) {
// printf("m:%p %d/%ld -> mm: %p\n", m, i,
// m->dependentMaterials.size(), mm); i++;
// }
// }
VX3_VoxelyzeKernel h_d_tmp(&MainSim);
// More VXA settings which is new in VX3
strcpy(h_d_tmp.vxa_filename, file.filename().c_str());
std::string RawPrint = pt_merged.get<std::string>("VXA.RawPrint", "");
std::cout << RawPrint << "\n";
ParseMathTree(h_d_tmp.StopConditionFormula, sizeof(h_d_tmp.StopConditionFormula),
"VXA.Simulator.StopCondition.StopConditionFormula", pt_merged);
h_d_tmp.EnableCollision = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableCollision", true);
h_d_tmp.enableAttach = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableAttach", false);
h_d_tmp.enableDetach = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableDetach", false);
h_d_tmp.watchDistance = pt_merged.get<double>("VXA.Simulator.AttachDetach.watchDistance", 1.0);
h_d_tmp.boundingRadius = pt_merged.get<double>("VXA.Simulator.AttachDetach.boundingRadius", 0.75);
h_d_tmp.SafetyGuard = pt_merged.get<int>("VXA.Simulator.AttachDetach.SafetyGuard", 500);
ParseMathTree(h_d_tmp.AttachCondition[0], sizeof(h_d_tmp.AttachCondition[0]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_0", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[1], sizeof(h_d_tmp.AttachCondition[1]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_1", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[2], sizeof(h_d_tmp.AttachCondition[2]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_2", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[3], sizeof(h_d_tmp.AttachCondition[3]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_3", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[4], sizeof(h_d_tmp.AttachCondition[4]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_4", pt_merged);
h_d_tmp.RecordStepSize = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordStepSize", 0);
h_d_tmp.RecordLink = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordLink", 0);
h_d_tmp.RecordVoxel = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordVoxel", 1);
ParseMathTree(h_d_tmp.fitness_function, sizeof(h_d_tmp.fitness_function), "VXA.Simulator.FitnessFunction", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_x_forcefield, sizeof(h_d_tmp.force_field.token_x_forcefield),
"VXA.Simulator.ForceField.x_forcefield", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_y_forcefield, sizeof(h_d_tmp.force_field.token_y_forcefield),
"VXA.Simulator.ForceField.y_forcefield", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_z_forcefield, sizeof(h_d_tmp.force_field.token_z_forcefield),
"VXA.Simulator.ForceField.z_forcefield", pt_merged);
// h_d_tmp.EnableTargetCloseness = pt_merged.get<int>("VXA.Simulator.EnableTargetCloseness", 0); abandoned.
h_d_tmp.SavePositionOfAllVoxels = pt_merged.get<int>("VXA.Simulator.SavePositionOfAllVoxels", 0);
h_d_tmp.MaxDistInVoxelLengthsToCountAsPair = pt_merged.get<double>("VXA.Simulator.MaxDistInVoxelLengthsToCountAsPair", 0);
h_d_tmp.EnableCilia = pt_merged.get<int>("VXA.Simulator.EnableCilia", 0);
h_d_tmp.EnableSignals = pt_merged.get<int>("VXA.Simulator.EnableSignals", 0);
// for Secondary Experiment
h_d_tmp.SecondaryExperiment = pt_merged.get<int>("VXA.Simulator.SecondaryExperiment", 0);
h_d_tmp.ReinitializeInitialPositionAfterThisManySeconds = pt_merged.get<double>("VXA.Simulator.ReinitializeInitialPositionAfterThisManySeconds", 0.0);
h_d_tmp.EnableExpansion = pt_merged.get<int>("VXA.Simulator.EnableExpansion", 0);
HeapSize = pt_merged.get<double>("VXA.GPU.HeapSize", 0.5);
if (HeapSize > 1.0) {
HeapSize = 0.99;
}
if (HeapSize < 0.01) {
HeapSize = 0.01;
}
PrintfFIFOSize = pt_merged.get<double>("VXA.GPU.PrintfFIFOSize", 50);
VcudaMemcpy(d_voxelyze_3s[device_index] + i, &h_d_tmp, sizeof(VX3_VoxelyzeKernel), hipMemcpyHostToDevice);
i++;
}
}
// GPU Heap is for in-kernel malloc(). Refer to
// https://stackoverflow.com/a/34795830/7001199
void VX3_SimulationManager::enlargeGPUHeapSize() {
size_t HeapSizeInBytes;
size_t free, total;
VcudaMemGetInfo(&free, &total);
printf("Total GPU memory %ld bytes.\n", total);
HeapSizeInBytes = HeapSize * total; // add some additional size
printf("Set GPU heap size to be %ld bytes.\n", HeapSizeInBytes);
VcudaDeviceSetLimit(hipLimitMallocHeapSize,
HeapSizeInBytes); // Set Heap Memory to 1G, instead of merely 8M.
// if "Lane User Stack Overflow" ocurs, maybe Stack Size too small, can try this:
// VcudaDeviceSetLimit(hipLimitStackSize, 2048);
}
/* gets the current printfFifo buffer size and increases it PrintfFIFOSize times */
void VX3_SimulationManager::enlargeGPUPrintfFIFOSize()
{
size_t PrintfFIFOSizeInBytes;
VcudaDeviceGetLimit(&PrintfFIFOSizeInBytes, hipLimitPrintfFifoSize);
VcudaDeviceSetLimit(hipLimitPrintfFifoSize, PrintfFIFOSizeInBytes*PrintfFIFOSize);
printf("set GPU printfFIFO size to be %ld bytes.\n", PrintfFIFOSizeInBytes*PrintfFIFOSize);
}
void VX3_SimulationManager::startKernel(int num_simulation, int device_index) {
int threadsPerBlock = 512;
int numBlocks = (num_simulation + threadsPerBlock - 1) / threadsPerBlock;
if (numBlocks == 1)
threadsPerBlock = num_simulation;
// printf("Starting kernel on device %d. passing d_voxelyze_3s[device_index]
// %p.\n", device_index, d_voxelyze_3s[device_index]);
// VX3_VoxelyzeKernel *result_voxelyze_kernel = (VX3_VoxelyzeKernel
// *)malloc(
// num_simulation * sizeof(VX3_VoxelyzeKernel));
// VcudaMemcpy(result_voxelyze_kernel, d_voxelyze_3s[device_index],
// num_simulation * sizeof(VX3_VoxelyzeKernel),
// hipMemcpyDeviceToHost);
enlargeGPUHeapSize();
enlargeGPUPrintfFIFOSize();
hipLaunchKernelGGL(( CUDA_Simulation), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_voxelyze_3s[device_index], num_simulation, device_index);
CUDA_CHECK_AFTER_CALL();
}
void VX3_SimulationManager::collectResults(int num_simulation, int device_index) {
// insert results to h_results
VX3_VoxelyzeKernel *result_voxelyze_kernel = (VX3_VoxelyzeKernel *)malloc(num_simulation * sizeof(VX3_VoxelyzeKernel));
VcudaMemcpy(result_voxelyze_kernel, d_voxelyze_3s[device_index], num_simulation * sizeof(VX3_VoxelyzeKernel), hipMemcpyDeviceToHost);
for (int i = 0; i < num_simulation; i++) {
VX3_SimulationResult tmp;
tmp.currentTime = result_voxelyze_kernel[i].currentTime;
tmp.fitness_score = result_voxelyze_kernel[i].fitness_score;
tmp.x = result_voxelyze_kernel[i].currentCenterOfMass.x;
tmp.y = result_voxelyze_kernel[i].currentCenterOfMass.y;
tmp.z = result_voxelyze_kernel[i].currentCenterOfMass.z;
result_voxelyze_kernel[i].initialCenterOfMass.copyTo(tmp.initialCenterOfMass);
result_voxelyze_kernel[i].currentCenterOfMass.copyTo(tmp.currentCenterOfMass);
tmp.numClosePairs = result_voxelyze_kernel[i].numClosePairs;
tmp.voxSize = result_voxelyze_kernel[i].voxSize;
tmp.num_voxel = result_voxelyze_kernel[i].num_d_voxels;
tmp.vxa_filename = result_voxelyze_kernel[i].vxa_filename;
VX3_Voxel *tmp_v;
tmp_v = (VX3_Voxel *)malloc(result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Voxel));
VcudaMemcpy(tmp_v, result_voxelyze_kernel[i].d_voxels, result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Voxel),
hipMemcpyDeviceToHost);
tmp.SavePositionOfAllVoxels = result_voxelyze_kernel[i].SavePositionOfAllVoxels;
VX3_Vec3D<>* tmp_init;
tmp_init = (VX3_Vec3D<>*)malloc(result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Vec3D<>));
VcudaMemcpy(tmp_init, result_voxelyze_kernel[i].d_initialPosition, result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Vec3D<>), hipMemcpyDeviceToHost);
tmp.num_measured_voxel = 0;
tmp.total_distance_of_all_voxels = 0.0;
for (int j = 0; j < result_voxelyze_kernel[i].num_d_voxels; j++) {
tmp.voxel_init_pos.push_back(Vec3D<>(tmp_init[j].x, tmp_init[j].y, tmp_init[j].z));
tmp.voxel_position.push_back(Vec3D<>(tmp_v[j].pos.x, tmp_v[j].pos.y, tmp_v[j].pos.z));
tmp.voxel_mats.push_back(tmp_v[j].matid);
if (tmp_v[j].isMeasured) {
tmp.num_measured_voxel ++;
tmp.total_distance_of_all_voxels += tmp.voxel_position.back().Dist(tmp.voxel_init_pos.back());
}
}
delete tmp_v;
// tmp.computeFitness();
h_results.push_back(tmp);
}
}
void VX3_SimulationManager::sortResults() { sort(h_results.begin(), h_results.end(), VX3_SimulationResult::compareFitnessScore); }
| 67ebc3a954498e41a66af6504ce6f6613a93f716.cu | #include "VX3_SimulationManager.cuh"
#include "ctool.h"
#include <boost/algorithm/string/case_conv.hpp>
#include <queue>
#include <stack>
#include <utility>
#include "VX3_VoxelyzeKernel.cuh"
#include "VX_Sim.h" //readVXA
__global__ void CUDA_Simulation(VX3_VoxelyzeKernel *d_voxelyze_3, int num_simulation, int device_index) {
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < num_simulation) {
VX3_VoxelyzeKernel *d_v3 = &d_voxelyze_3[thread_index];
if (d_v3->num_d_links == 0 and d_v3->num_d_voxels == 0) {
printf(COLORCODE_BOLD_RED "No links and no voxels. Simulation %d (%s) abort.\n" COLORCODE_RESET, thread_index,
d_v3->vxa_filename);
return;
}
d_v3->syncVectors(); // Everytime we pass a class with VX3_vectors in
// it, we should sync hd_vector to d_vector first.
d_v3->saveInitialPosition();
d_v3->isSurfaceChanged = true; // trigger surface regenerating and calculate normal thrust for the first time
d_v3->registerTargets();
printf(COLORCODE_GREEN "%d) Simulation %d runs: %s.\n" COLORCODE_RESET, device_index, thread_index, d_v3->vxa_filename);
// printf("%d) Simulation %d: links %d, voxels %d.\n", device_index, i,
// d_v3->num_d_links, d_v3->num_d_voxels); printf("%d) Simulation %d
// enableAttach %d.\n", device_index, i, d_v3->enableAttach);
//
// print check regenerateSurfaceVoxels() is correct. (TODO: shouldn't
// this be tested in seperate test code? :) printf("all voxels:"); for
// (int j=0;j<d_v3->num_d_voxels;j++) {
// printf(" [%d]%p ", j, &d_v3->d_voxels[j]);
// }
// printf("\nsurface:");
// for (int j=0;j<d_v3->num_d_surface_voxels;j++) {
// printf(" [%d]%p ", j, d_v3->d_surface_voxels[j]);
// }
//
if (d_v3->RecordStepSize) { // output History file
// rescale the whole space. so history file can contain less digits. ( e.g. not 0.000221, but 2.21 )
printf("\n{{{setting}}}<rescale>0.001</rescale>\n");
// materials' color
for (int i = 0; i < d_v3->num_d_voxelMats; i++) {
auto &mat = d_v3->d_voxelMats[i];
printf("{{{setting}}}<matcolor><id>%d</id><r>%.2f</r><g>%.2f</g><b>%.2f</b><a>%.2f</a></matcolor>\n", mat.matid,
mat.r / 255., mat.g / 255., mat.b / 255., mat.a / 255.);
}
printf("\n{{{setting}}}<voxel_size>%f</voxel_size>\n", d_v3->voxSize);
}
double vs = 1 / 0.001;
d_v3->updateCurrentCenterOfMass();
d_v3->InitializeCenterOfMass();
int real_stepsize = int(d_v3->RecordStepSize / (10000 * d_v3->recommendedTimeStep() * d_v3->DtFrac))+1;
printf("real_stepsize: %d ; recommendedTimeStep %f; d_v3->DtFrac %f . \n", real_stepsize, d_v3->recommendedTimeStep(),
d_v3->DtFrac);
// printf("Initial CoM: %f %f %f mm\n",
// d_v3->initialCenterOfMass.x*1000, d_v3->initialCenterOfMass.y*1000,
// d_v3->initialCenterOfMass.z*1000);
for (int j = 0; j < 1000000; j++) { // Maximum Steps 1000000
if (d_v3->StopConditionMet())
break;
if (!d_v3->doTimeStep()) {
printf(COLORCODE_BOLD_RED "\n%d) Simulation %d Diverged: %s.\n" COLORCODE_RESET, device_index, thread_index,
d_v3->vxa_filename);
break;
}
if (d_v3->RecordStepSize) { // output History file
if (j % real_stepsize == 0) {
if (d_v3->RecordVoxel) {
// Voxels
printf("<<<Step%d Time:%f>>>", j, d_v3->currentTime);
for (int i = 0; i < d_v3->num_d_surface_voxels; i++) {
auto v = d_v3->d_surface_voxels[i];
if (v->removed)
continue;
if (v->isSurface()) {
printf("%.1f,%.1f,%.1f,", v->pos.x * vs, v->pos.y * vs, v->pos.z * vs);
printf("%.1f,%.2f,%.2f,%.2f,", v->orient.AngleDegrees(), v->orient.x, v->orient.y, v->orient.z);
VX3_Vec3D<double> ppp, nnn;
nnn = v->cornerOffset(NNN);
ppp = v->cornerOffset(PPP);
printf("%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,", nnn.x * vs, nnn.y * vs, nnn.z * vs, ppp.x * vs, ppp.y * vs,
ppp.z * vs);
printf("%d,", v->mat->matid); // for coloring
printf("%.1f,", v->localSignal); // for coloring as well.
printf(";");
}
}
printf("<<<>>>");
}
if (d_v3->RecordLink) {
// Links
printf("|[[[%d]]]", j);
for (int i = 0; i < d_v3->d_v_links.size(); i++) {
auto l = d_v3->d_v_links[i];
if (l->removed)
continue;
// only draw links that are not detached.
if (!l->isDetached) {
auto v1 = l->pVPos;
printf("%.4f,%.4f,%.4f,", v1->pos.x, v1->pos.y, v1->pos.z);
auto v2 = l->pVNeg;
printf("%.4f,%.4f,%.4f,", v2->pos.x, v2->pos.y, v2->pos.z);
printf(";");
}
}
printf("[[[]]]");
}
printf("\n");
}
}
}
d_v3->updateCurrentCenterOfMass();
d_v3->computeFitness();
printf(COLORCODE_BLUE "%d) Simulation %d ends: %s Time: %f, angleSampleTimes: %d.\n" COLORCODE_RESET, device_index, thread_index,
d_v3->vxa_filename, d_v3->currentTime, d_v3->angleSampleTimes);
}
}
VX3_SimulationManager::VX3_SimulationManager(std::vector<std::vector<fs::path>> in_sub_batches, fs::path in_base, fs::path in_input_dir,
int in_num_of_devices)
: sub_batches(in_sub_batches), base(in_base), num_of_devices(in_num_of_devices), input_dir(in_input_dir) {
d_voxelyze_3s.resize(num_of_devices);
for (int i = 0; i < num_of_devices; i++) {
d_voxelyze_3s[i] = NULL;
}
}
VX3_SimulationManager::~VX3_SimulationManager() {
for (auto d : d_voxelyze_3s) {
if (d)
VcudaFree(d);
}
}
void VX3_SimulationManager::start() {
for (int device_index = 0; device_index < num_of_devices; device_index++) { // multi GPUs
auto files = sub_batches[device_index];
if (files.size()) {
VcudaSetDevice(device_index);
printf("=== set device to %d for %ld simulations ===\n", device_index, files.size());
// readVXA(base)
readVXD(base, files, device_index);
startKernel(files.size(), device_index);
}
}
VcudaDeviceSynchronize();
for (int device_index = 0; device_index < num_of_devices; device_index++) { // multi GPUs
auto files = sub_batches[device_index];
collectResults(files.size(), device_index);
}
sortResults();
}
void VX3_SimulationManager::ParseMathTree(VX3_MathTreeToken *field_ptr, size_t max_length, std::string node_address, pt::ptree &tree) {
// Classic BFS, push all token into stack
std::queue<pt::ptree> frontier;
std::stack<std::pair<std::string, std::string>> tokens;
tokens.push(make_pair((std::string) "mtEND", (std::string) ""));
auto root = tree.get_child_optional(node_address);
if (!root) {
// printf(COLORCODE_BOLD_RED "ERROR: No ParseMathTree %s in VXA.\n", node_address.c_str());
return;
}
frontier.push(tree.get_child(node_address));
while (!frontier.empty()) {
std::queue<pt::ptree> next_frontier;
auto t = frontier.front();
frontier.pop();
BOOST_FOREACH (pt::ptree::value_type &v_child, t.get_child("")) {
std::string value = v_child.second.data();
boost::trim_right(value);
std::string op = v_child.first.data();
boost::trim_right(op);
// std::cout << op << ":" << value << "\n";
tokens.push(make_pair(op, value));
frontier.push(v_child.second);
}
}
// pop from stack to VX3_MathTreeToken* (so we get a reversed order)
int i = 0;
while (!tokens.empty()) {
if (i > max_length) {
printf(COLORCODE_BOLD_RED "ERROR: Token size overflow.\n");
return;
}
std::pair<std::string, std::string> tok = tokens.top();
VX3_MathTreeToken *p = &field_ptr[i];
if (tok.first == "mtEND") {
p->op = mtEND;
} else if (tok.first == "mtVAR") {
p->op = mtVAR;
if (tok.second == "x") {
p->value = 0;
} else if (tok.second == "y") {
p->value = 1;
} else if (tok.second == "z") {
p->value = 2;
} else if (tok.second == "hit") {
p->value = 3;
} else if (tok.second == "t") {
p->value = 4;
} else if (tok.second == "angle") {
p->value = 5;
} else if (tok.second == "targetCloseness") {
p->value = 6;
} else if (tok.second == "numClosePairs") {
p->value = 7;
} else if (tok.second == "num_voxel") {
p->value = 8;
} else {
printf(COLORCODE_BOLD_RED "ERROR: No such variable.\n");
break;
}
} else if (tok.first == "mtCONST") {
p->op = mtCONST;
try {
p->value = std::stod(tok.second);
} catch(...) {
printf(COLORCODE_BOLD_RED "ERROR: mtCONST with no number.\n");
break;
}
} else if (tok.first == "mtADD") {
p->op = mtADD;
} else if (tok.first == "mtSUB") {
p->op = mtSUB;
} else if (tok.first == "mtMUL") {
p->op = mtMUL;
} else if (tok.first == "mtDIV") {
p->op = mtDIV;
} else if (tok.first == "mtPOW") {
p->op = mtPOW;
} else if (tok.first == "mtSQRT") {
p->op = mtSQRT;
} else if (tok.first == "mtE") {
p->op = mtE;
} else if (tok.first == "mtPI") {
p->op = mtPI;
} else if (tok.first == "mtSIN") {
p->op = mtSIN;
} else if (tok.first == "mtCOS") {
p->op = mtCOS;
} else if (tok.first == "mtTAN") {
p->op = mtTAN;
} else if (tok.first == "mtATAN") {
p->op = mtATAN;
} else if (tok.first == "mtLOG") {
p->op = mtLOG;
} else if (tok.first == "mtINT") {
p->op = mtINT;
} else if (tok.first == "mtABS") {
p->op = mtABS;
} else if (tok.first == "mtNOT") {
p->op = mtNOT;
} else if (tok.first == "mtGREATERTHAN") {
p->op = mtGREATERTHAN;
} else if (tok.first == "mtLESSTHAN") {
p->op = mtLESSTHAN;
} else if (tok.first == "mtAND") {
p->op = mtAND;
} else if (tok.first == "mtOR") {
p->op = mtOR;
} else if (tok.first == "mtNORMALCDF") {
p->op = mtNORMALCDF;
} else {
printf(COLORCODE_BOLD_RED "ERROR: Token Operation not implemented.\n");
break;
}
i++;
tokens.pop();
}
}
void VX3_SimulationManager::readVXD(fs::path base, std::vector<fs::path> files, int device_index) {
pt::ptree pt_baseVXA;
pt::read_xml(base.string(), pt_baseVXA);
int num_simulation = files.size();
VcudaMalloc((void **)&d_voxelyze_3s[device_index], num_simulation * sizeof(VX3_VoxelyzeKernel));
int i = 0;
for (auto &file : files) {
// Read VXD file, clone base VXA, replace parts specified in VXD, send
// to MainSim.ReadVXA to process. printf("reading %s\n",
// (input_dir/file).c_str());
pt::ptree pt_VXD;
pt::read_xml((input_dir / file).string(), pt_VXD);
pt::ptree pt_merged = pt_baseVXA;
ctool::ptree_merge(pt_VXD, pt_merged);
std::ostringstream stream_merged;
std::string str_merged;
pt::write_xml(stream_merged, pt_merged);
str_merged = stream_merged.str();
CXML_Rip XML;
XML.fromXMLText(&str_merged);
CVX_Environment MainEnv;
CVX_Sim MainSim;
CVX_Object MainObj;
MainEnv.pObj = &MainObj; // connect environment to object
MainSim.pEnv = &MainEnv; // connect Simulation to envirnment
std::string RetMessage;
// std::cout<<str_merged;
MainSim.ReadVXA(&XML, &RetMessage);
MainSim.Import(NULL, NULL, &RetMessage);
if (!RetMessage.empty()) {
printf(COLORCODE_BOLD_RED "%s\n" COLORCODE_RESET, RetMessage.c_str());
}
// for (auto m:MainSim.Vx.voxelMats) {
// int i=0;
// for (auto mm:m->dependentMaterials) {
// printf("m:%p %d/%ld -> mm: %p\n", m, i,
// m->dependentMaterials.size(), mm); i++;
// }
// }
VX3_VoxelyzeKernel h_d_tmp(&MainSim);
// More VXA settings which is new in VX3
strcpy(h_d_tmp.vxa_filename, file.filename().c_str());
std::string RawPrint = pt_merged.get<std::string>("VXA.RawPrint", "");
std::cout << RawPrint << "\n";
ParseMathTree(h_d_tmp.StopConditionFormula, sizeof(h_d_tmp.StopConditionFormula),
"VXA.Simulator.StopCondition.StopConditionFormula", pt_merged);
h_d_tmp.EnableCollision = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableCollision", true);
h_d_tmp.enableAttach = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableAttach", false);
h_d_tmp.enableDetach = pt_merged.get<bool>("VXA.Simulator.AttachDetach.EnableDetach", false);
h_d_tmp.watchDistance = pt_merged.get<double>("VXA.Simulator.AttachDetach.watchDistance", 1.0);
h_d_tmp.boundingRadius = pt_merged.get<double>("VXA.Simulator.AttachDetach.boundingRadius", 0.75);
h_d_tmp.SafetyGuard = pt_merged.get<int>("VXA.Simulator.AttachDetach.SafetyGuard", 500);
ParseMathTree(h_d_tmp.AttachCondition[0], sizeof(h_d_tmp.AttachCondition[0]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_0", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[1], sizeof(h_d_tmp.AttachCondition[1]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_1", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[2], sizeof(h_d_tmp.AttachCondition[2]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_2", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[3], sizeof(h_d_tmp.AttachCondition[3]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_3", pt_merged);
ParseMathTree(h_d_tmp.AttachCondition[4], sizeof(h_d_tmp.AttachCondition[4]),
"VXA.Simulator.AttachDetach.AttachCondition.Condition_4", pt_merged);
h_d_tmp.RecordStepSize = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordStepSize", 0);
h_d_tmp.RecordLink = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordLink", 0);
h_d_tmp.RecordVoxel = pt_merged.get<int>("VXA.Simulator.RecordHistory.RecordVoxel", 1);
ParseMathTree(h_d_tmp.fitness_function, sizeof(h_d_tmp.fitness_function), "VXA.Simulator.FitnessFunction", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_x_forcefield, sizeof(h_d_tmp.force_field.token_x_forcefield),
"VXA.Simulator.ForceField.x_forcefield", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_y_forcefield, sizeof(h_d_tmp.force_field.token_y_forcefield),
"VXA.Simulator.ForceField.y_forcefield", pt_merged);
ParseMathTree(h_d_tmp.force_field.token_z_forcefield, sizeof(h_d_tmp.force_field.token_z_forcefield),
"VXA.Simulator.ForceField.z_forcefield", pt_merged);
// h_d_tmp.EnableTargetCloseness = pt_merged.get<int>("VXA.Simulator.EnableTargetCloseness", 0); abandoned.
h_d_tmp.SavePositionOfAllVoxels = pt_merged.get<int>("VXA.Simulator.SavePositionOfAllVoxels", 0);
h_d_tmp.MaxDistInVoxelLengthsToCountAsPair = pt_merged.get<double>("VXA.Simulator.MaxDistInVoxelLengthsToCountAsPair", 0);
h_d_tmp.EnableCilia = pt_merged.get<int>("VXA.Simulator.EnableCilia", 0);
h_d_tmp.EnableSignals = pt_merged.get<int>("VXA.Simulator.EnableSignals", 0);
// for Secondary Experiment
h_d_tmp.SecondaryExperiment = pt_merged.get<int>("VXA.Simulator.SecondaryExperiment", 0);
h_d_tmp.ReinitializeInitialPositionAfterThisManySeconds = pt_merged.get<double>("VXA.Simulator.ReinitializeInitialPositionAfterThisManySeconds", 0.0);
h_d_tmp.EnableExpansion = pt_merged.get<int>("VXA.Simulator.EnableExpansion", 0);
HeapSize = pt_merged.get<double>("VXA.GPU.HeapSize", 0.5);
if (HeapSize > 1.0) {
HeapSize = 0.99;
}
if (HeapSize < 0.01) {
HeapSize = 0.01;
}
PrintfFIFOSize = pt_merged.get<double>("VXA.GPU.PrintfFIFOSize", 50);
VcudaMemcpy(d_voxelyze_3s[device_index] + i, &h_d_tmp, sizeof(VX3_VoxelyzeKernel), cudaMemcpyHostToDevice);
i++;
}
}
// GPU Heap is for in-kernel malloc(). Refer to
// https://stackoverflow.com/a/34795830/7001199
void VX3_SimulationManager::enlargeGPUHeapSize() {
size_t HeapSizeInBytes;
size_t free, total;
VcudaMemGetInfo(&free, &total);
printf("Total GPU memory %ld bytes.\n", total);
HeapSizeInBytes = HeapSize * total; // add some additional size
printf("Set GPU heap size to be %ld bytes.\n", HeapSizeInBytes);
VcudaDeviceSetLimit(cudaLimitMallocHeapSize,
HeapSizeInBytes); // Set Heap Memory to 1G, instead of merely 8M.
// if "Lane User Stack Overflow" ocurs, maybe Stack Size too small, can try this:
// VcudaDeviceSetLimit(cudaLimitStackSize, 2048);
}
/* gets the current printfFifo buffer size and increases it PrintfFIFOSize times */
void VX3_SimulationManager::enlargeGPUPrintfFIFOSize()
{
size_t PrintfFIFOSizeInBytes;
VcudaDeviceGetLimit(&PrintfFIFOSizeInBytes, cudaLimitPrintfFifoSize);
VcudaDeviceSetLimit(cudaLimitPrintfFifoSize, PrintfFIFOSizeInBytes*PrintfFIFOSize);
printf("set GPU printfFIFO size to be %ld bytes.\n", PrintfFIFOSizeInBytes*PrintfFIFOSize);
}
void VX3_SimulationManager::startKernel(int num_simulation, int device_index) {
int threadsPerBlock = 512;
int numBlocks = (num_simulation + threadsPerBlock - 1) / threadsPerBlock;
if (numBlocks == 1)
threadsPerBlock = num_simulation;
// printf("Starting kernel on device %d. passing d_voxelyze_3s[device_index]
// %p.\n", device_index, d_voxelyze_3s[device_index]);
// VX3_VoxelyzeKernel *result_voxelyze_kernel = (VX3_VoxelyzeKernel
// *)malloc(
// num_simulation * sizeof(VX3_VoxelyzeKernel));
// VcudaMemcpy(result_voxelyze_kernel, d_voxelyze_3s[device_index],
// num_simulation * sizeof(VX3_VoxelyzeKernel),
// cudaMemcpyDeviceToHost);
enlargeGPUHeapSize();
enlargeGPUPrintfFIFOSize();
CUDA_Simulation<<<numBlocks, threadsPerBlock>>>(d_voxelyze_3s[device_index], num_simulation, device_index);
CUDA_CHECK_AFTER_CALL();
}
void VX3_SimulationManager::collectResults(int num_simulation, int device_index) {
// insert results to h_results
VX3_VoxelyzeKernel *result_voxelyze_kernel = (VX3_VoxelyzeKernel *)malloc(num_simulation * sizeof(VX3_VoxelyzeKernel));
VcudaMemcpy(result_voxelyze_kernel, d_voxelyze_3s[device_index], num_simulation * sizeof(VX3_VoxelyzeKernel), cudaMemcpyDeviceToHost);
for (int i = 0; i < num_simulation; i++) {
VX3_SimulationResult tmp;
tmp.currentTime = result_voxelyze_kernel[i].currentTime;
tmp.fitness_score = result_voxelyze_kernel[i].fitness_score;
tmp.x = result_voxelyze_kernel[i].currentCenterOfMass.x;
tmp.y = result_voxelyze_kernel[i].currentCenterOfMass.y;
tmp.z = result_voxelyze_kernel[i].currentCenterOfMass.z;
result_voxelyze_kernel[i].initialCenterOfMass.copyTo(tmp.initialCenterOfMass);
result_voxelyze_kernel[i].currentCenterOfMass.copyTo(tmp.currentCenterOfMass);
tmp.numClosePairs = result_voxelyze_kernel[i].numClosePairs;
tmp.voxSize = result_voxelyze_kernel[i].voxSize;
tmp.num_voxel = result_voxelyze_kernel[i].num_d_voxels;
tmp.vxa_filename = result_voxelyze_kernel[i].vxa_filename;
VX3_Voxel *tmp_v;
tmp_v = (VX3_Voxel *)malloc(result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Voxel));
VcudaMemcpy(tmp_v, result_voxelyze_kernel[i].d_voxels, result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Voxel),
cudaMemcpyDeviceToHost);
tmp.SavePositionOfAllVoxels = result_voxelyze_kernel[i].SavePositionOfAllVoxels;
VX3_Vec3D<>* tmp_init;
tmp_init = (VX3_Vec3D<>*)malloc(result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Vec3D<>));
VcudaMemcpy(tmp_init, result_voxelyze_kernel[i].d_initialPosition, result_voxelyze_kernel[i].num_d_voxels * sizeof(VX3_Vec3D<>), cudaMemcpyDeviceToHost);
tmp.num_measured_voxel = 0;
tmp.total_distance_of_all_voxels = 0.0;
for (int j = 0; j < result_voxelyze_kernel[i].num_d_voxels; j++) {
tmp.voxel_init_pos.push_back(Vec3D<>(tmp_init[j].x, tmp_init[j].y, tmp_init[j].z));
tmp.voxel_position.push_back(Vec3D<>(tmp_v[j].pos.x, tmp_v[j].pos.y, tmp_v[j].pos.z));
tmp.voxel_mats.push_back(tmp_v[j].matid);
if (tmp_v[j].isMeasured) {
tmp.num_measured_voxel ++;
tmp.total_distance_of_all_voxels += tmp.voxel_position.back().Dist(tmp.voxel_init_pos.back());
}
}
delete tmp_v;
// tmp.computeFitness();
h_results.push_back(tmp);
}
}
void VX3_SimulationManager::sortResults() { sort(h_results.begin(), h_results.end(), VX3_SimulationResult::compareFitnessScore); }
|
0736be7e168fca9d32dad5826d8d232fa33f043c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pow_array_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int power = 1;
int array_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pow_array_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, a,power,array_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pow_array_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, a,power,array_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pow_array_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, a,power,array_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0736be7e168fca9d32dad5826d8d232fa33f043c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pow_array_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int power = 1;
int array_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pow_array_gpu<<<gridBlock,threadBlock>>>(a,power,array_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pow_array_gpu<<<gridBlock,threadBlock>>>(a,power,array_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pow_array_gpu<<<gridBlock,threadBlock>>>(a,power,array_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b01184e9f662703f18c7cf752aedbda58dd24b5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -S -o /dev/null -verify \
// RUN: -verify-ignore-unexpected=note %s
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -S -o /dev/null -fcuda-is-device \
// RUN: -verify -verify-ignore-unexpected=note %s
#include "Inputs/cuda.h"
// FIXME: Merge into function-overload.cu once deferred errors can be emitted
// when non-deferred errors are present.
#if !defined(__CUDA_ARCH__)
//expected-no-diagnostics
#endif
typedef void (*GlobalFnPtr)(); // __global__ functions must return void.
__global__ void g() {}
__host__ __device__ void hd() {
GlobalFnPtr fp_g = g;
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
hipLaunchKernelGGL(( g), dim3(0),dim3(0), 0, 0, );
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif // __CUDA_ARCH__
}
| b01184e9f662703f18c7cf752aedbda58dd24b5e.cu | // REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -S -o /dev/null -verify \
// RUN: -verify-ignore-unexpected=note %s
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -S -o /dev/null -fcuda-is-device \
// RUN: -verify -verify-ignore-unexpected=note %s
#include "Inputs/cuda.h"
// FIXME: Merge into function-overload.cu once deferred errors can be emitted
// when non-deferred errors are present.
#if !defined(__CUDA_ARCH__)
//expected-no-diagnostics
#endif
typedef void (*GlobalFnPtr)(); // __global__ functions must return void.
__global__ void g() {}
__host__ __device__ void hd() {
GlobalFnPtr fp_g = g;
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
g<<<0,0>>>();
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif // __CUDA_ARCH__
}
|
c16c9ddd20219abe9825810e0e37ca2204ead213.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <unistd.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define ISLAND 6
#define POPULATION 20
#define FACILITY 20
#define GENERATION 1
#define CROSSOVER 0.6
#define MUTATION 0.03
#define MIGRATION 15
#define INDIVIDUAL 5
#define H 15 // BAY height
#define W 10 // BAY width
__global__ void init(unsigned int seed, hiprandState_t* states) {
short b=blockIdx.x; // == ISLAND
short t=threadIdx.x; // == POPULATION
short n=blockDim.x; // == num of ISLAND
short x=b*n+t;
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[x]);
}
__global__ void randomData(hiprandState_t* states, short* GA){
short b=blockIdx.x; // == ISLAND
short t=threadIdx.x; // == POPULATION
short n=blockDim.x; // == num of ISLAND
short x=b*n+t;
for(int j=0;j<FACILITY;j++){ // setup
GA[x*FACILITY + j] = j;
}
int i; // shuffle
for(i = 0; i < FACILITY; i++) {
short k = hiprand(&states[x]) % FACILITY;
int tmp = GA[x*FACILITY + i];
GA[x*FACILITY + i] = GA[x*FACILITY + k];
GA[x*FACILITY + k] = tmp;
}
}
__global__ void randomBay(hiprandState_t* states, bool* GB){
short b=blockIdx.x; // == ISLAND
short t=threadIdx.x; // == POPULATION
short n=blockDim.x; // == num of ISLAND
short x=b*n+t;
int i; // shuffle
for(i = 0; i < FACILITY-1; i++) {
GB[x*(FACILITY-1) + i] = hiprand(&states[x]) % 2;
}
}
__global__ void calPosition(short *data, bool *bay, float *position){
short b=blockIdx.x; // == ISLAND
short t=threadIdx.x; // == POPULATION
short n=blockDim.x; // == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
short bayposit = x * (FACILITY-1);
// int posit=b*POPULATION*FACILITY+t*FACILITY; //
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
position[i] = 0;
}
short len = 1;
short next = 0;
for(short f=0;f<FACILITY;f++){
if(bay[bayposit+f] == 0){
len = len + 1;
}
if(bay[bayposit+f] == 1 || f == FACILITY - 1 ){
if(f == FACILITY - 1 && bay[bayposit+f] == 0){
len = len - 1;
}
float x = W / 2.0 + next;
for(short j=0;j<len;j++){
position[posit*2+(f+j-len+1)*2] = x;
float y = H / (len * 2.0) * ( (j * 2) + 1) ;
position[posit*2+(f+j-len+1)*2+1] = y;
}
len = 1;
next = next + W;
}
}
}
int main(){
float START, END;
START = clock();
hiprandState_t* states;
hipMalloc((void**) &states, ISLAND * POPULATION * sizeof(hiprandState_t));
// init seed
hipLaunchKernelGGL(( init), dim3(ISLAND), dim3(POPULATION), 0, 0, time(NULL), states);
// generate random data
short *GA;
hipMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(short));
bool *GB;
hipMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool));
hipLaunchKernelGGL(( randomData), dim3(ISLAND), dim3(POPULATION), 0, 0, states, GA);
hipLaunchKernelGGL(( randomBay), dim3(ISLAND), dim3(POPULATION), 0, 0, states, GB);
short data[ISLAND][POPULATION][FACILITY];
bool bay[ISLAND][POPULATION][FACILITY-1];
hipMemcpy(data, GA, ISLAND*POPULATION*FACILITY*sizeof(short), hipMemcpyDeviceToHost);
hipMemcpy(bay, GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), hipMemcpyDeviceToHost);
// print data
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY;k++){
printf("%hu ", data[i][j][k]);
}
printf("\n");
}
}
// print bay
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY-1;k++){
printf("%d ", bay[i][j][k]);
}
printf("\n");
}
}
FILE *fPtr;
int ttt = FACILITY * (FACILITY-1) ;
fPtr=fopen("cost.txt","r");
int cost[FACILITY][FACILITY] = {0};
int temp[ttt][3]; // cost
for(int i=0;i<ttt;i++){
fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]);
}
fclose(fPtr);
for(int i=0;i<ttt;i++){ // 2 dimention cost
cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2];
}
for(int i=0;i<FACILITY;i++){
for(int j=0;j<FACILITY;j++){
printf("%d ", cost[i][j]);
}
printf("\n");
}
int *Gcost;
hipMalloc((void**)&Gcost, FACILITY*FACILITY*sizeof(int));
hipMemcpy(Gcost, cost, FACILITY*FACILITY*sizeof(int), hipMemcpyHostToDevice);
for(int gggggg=0;gggggg<GENERATION;gggggg++){ // generation start
float *Gposition;
hipMalloc((void**)&Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float));
// calculate position
hipLaunchKernelGGL(( calPosition), dim3(ISLAND), dim3(POPULATION), 0, 0, GA, GB, Gposition);
float position[ISLAND][POPULATION][FACILITY][2];
hipMemcpy(position, Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float), hipMemcpyDeviceToHost);
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY;f++){
for(int t=0;t<2;t++){
printf("%.2f ", position[i][p][f][t]);
}
printf("\n");
}
}
}
} // generation end
END = clock();
printf("%f\n", (END - START) / CLOCKS_PER_SEC);
return 0;
} | c16c9ddd20219abe9825810e0e37ca2204ead213.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#define ISLAND 6
#define POPULATION 20
#define FACILITY 20
#define GENERATION 1
#define CROSSOVER 0.6
#define MUTATION 0.03
#define MIGRATION 15
#define INDIVIDUAL 5
#define H 15 // BAY height
#define W 10 // BAY width
__global__ void init(unsigned int seed, curandState_t* states) {
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[x]);
}
__global__ void randomData(curandState_t* states, short* GA){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
for(int j=0;j<FACILITY;j++){ // setup
GA[x*FACILITY + j] = j;
}
int i; // shuffle
for(i = 0; i < FACILITY; i++) {
short k = curand(&states[x]) % FACILITY;
int tmp = GA[x*FACILITY + i];
GA[x*FACILITY + i] = GA[x*FACILITY + k];
GA[x*FACILITY + k] = tmp;
}
}
__global__ void randomBay(curandState_t* states, bool* GB){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
int i; // shuffle
for(i = 0; i < FACILITY-1; i++) {
GB[x*(FACILITY-1) + i] = curand(&states[x]) % 2;
}
}
__global__ void calPosition(short *data, bool *bay, float *position){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
short bayposit = x * (FACILITY-1);
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
position[i] = 0;
}
short len = 1;
short next = 0;
for(short f=0;f<FACILITY;f++){
if(bay[bayposit+f] == 0){
len = len + 1;
}
if(bay[bayposit+f] == 1 || f == FACILITY - 1 ){
if(f == FACILITY - 1 && bay[bayposit+f] == 0){
len = len - 1;
}
float x = W / 2.0 + next;
for(short j=0;j<len;j++){
position[posit*2+(f+j-len+1)*2] = x;
float y = H / (len * 2.0) * ( (j * 2) + 1) ;
position[posit*2+(f+j-len+1)*2+1] = y;
}
len = 1;
next = next + W;
}
}
}
int main(){
float START, END;
START = clock();
curandState_t* states;
cudaMalloc((void**) &states, ISLAND * POPULATION * sizeof(curandState_t));
// init seed
init<<<ISLAND, POPULATION>>>(time(NULL), states);
// generate random data
short *GA;
cudaMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(short));
bool *GB;
cudaMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool));
randomData<<<ISLAND, POPULATION>>>(states, GA);
randomBay<<<ISLAND, POPULATION>>>(states, GB);
short data[ISLAND][POPULATION][FACILITY];
bool bay[ISLAND][POPULATION][FACILITY-1];
cudaMemcpy(data, GA, ISLAND*POPULATION*FACILITY*sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(bay, GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), cudaMemcpyDeviceToHost);
// print data
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY;k++){
printf("%hu ", data[i][j][k]);
}
printf("\n");
}
}
// print bay
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY-1;k++){
printf("%d ", bay[i][j][k]);
}
printf("\n");
}
}
FILE *fPtr;
int ttt = FACILITY * (FACILITY-1) ;
fPtr=fopen("cost.txt","r");
int cost[FACILITY][FACILITY] = {0};
int temp[ttt][3]; // cost
for(int i=0;i<ttt;i++){
fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]);
}
fclose(fPtr);
for(int i=0;i<ttt;i++){ // 2 dimention cost
cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2];
}
for(int i=0;i<FACILITY;i++){
for(int j=0;j<FACILITY;j++){
printf("%d ", cost[i][j]);
}
printf("\n");
}
int *Gcost;
cudaMalloc((void**)&Gcost, FACILITY*FACILITY*sizeof(int));
cudaMemcpy(Gcost, cost, FACILITY*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
for(int gggggg=0;gggggg<GENERATION;gggggg++){ // generation start
float *Gposition;
cudaMalloc((void**)&Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float));
// calculate position
calPosition<<<ISLAND, POPULATION>>>(GA, GB, Gposition);
float position[ISLAND][POPULATION][FACILITY][2];
cudaMemcpy(position, Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY;f++){
for(int t=0;t<2;t++){
printf("%.2f ", position[i][p][f][t]);
}
printf("\n");
}
}
}
} // generation end
END = clock();
printf("%f\n", (END - START) / CLOCKS_PER_SEC);
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.