repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/search/contains_table.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <cuco/static_set.cuh>
#include <type_traits>
namespace cudf::detail {
namespace {
using cudf::experimental::row::lhs_index_type;
using cudf::experimental::row::rhs_index_type;
/**
* @brief An hasher adapter wrapping both haystack hasher and needles hasher
*/
template <typename HaystackHasher, typename NeedleHasher>
struct hasher_adapter {
hasher_adapter(HaystackHasher const& haystack_hasher, NeedleHasher const& needle_hasher)
: _haystack_hasher{haystack_hasher}, _needle_hasher{needle_hasher}
{
}
__device__ constexpr auto operator()(lhs_index_type idx) const noexcept
{
return _haystack_hasher(static_cast<size_type>(idx));
}
__device__ constexpr auto operator()(rhs_index_type idx) const noexcept
{
return _needle_hasher(static_cast<size_type>(idx));
}
private:
HaystackHasher const _haystack_hasher;
NeedleHasher const _needle_hasher;
};
/**
* @brief An comparator adapter wrapping both self comparator and two table comparator
*/
template <typename SelfEqual, typename TwoTableEqual>
struct comparator_adapter {
comparator_adapter(SelfEqual const& self_equal, TwoTableEqual const& two_table_equal)
: _self_equal{self_equal}, _two_table_equal{two_table_equal}
{
}
__device__ constexpr auto operator()(lhs_index_type lhs_index,
lhs_index_type rhs_index) const noexcept
{
auto const lhs = static_cast<size_type>(lhs_index);
auto const rhs = static_cast<size_type>(rhs_index);
return _self_equal(lhs, rhs);
}
__device__ constexpr auto operator()(lhs_index_type lhs_index,
rhs_index_type rhs_index) const noexcept
{
return _two_table_equal(lhs_index, rhs_index);
}
private:
SelfEqual const _self_equal;
TwoTableEqual const _two_table_equal;
};
/**
* @brief Build a row bitmask for the input table.
*
* The output bitmask will have invalid bits corresponding to the input rows having nulls (at
* any nested level) and vice versa.
*
* @param input The input table
* @param stream CUDA stream used for device memory operations and kernel launches
* @return A pair of pointer to the output bitmask and the buffer containing the bitmask
*/
std::pair<rmm::device_buffer, bitmask_type const*> build_row_bitmask(table_view const& input,
rmm::cuda_stream_view stream)
{
auto const nullable_columns = get_nullable_columns(input);
CUDF_EXPECTS(nullable_columns.size() > 0,
"The input table has nulls thus it should have nullable columns.");
// If there are more than one nullable column, we compute `bitmask_and` of their null masks.
// Otherwise, we have only one nullable column and can use its null mask directly.
if (nullable_columns.size() > 1) {
auto row_bitmask =
cudf::detail::bitmask_and(
table_view{nullable_columns}, stream, rmm::mr::get_current_device_resource())
.first;
auto const row_bitmask_ptr = static_cast<bitmask_type const*>(row_bitmask.data());
return std::pair(std::move(row_bitmask), row_bitmask_ptr);
}
return std::pair(rmm::device_buffer{0, stream}, nullable_columns.front().null_mask());
}
/**
* @brief Invokes the given `func` with desired comparators based on the specified `compare_nans`
* parameter
*
* @tparam HasNested Flag indicating whether there are nested columns in haystack or needles
* @tparam Hasher Type of device hash function
* @tparam Func Type of the helper function doing `contains` check
*
* @param compare_nulls Control whether nulls should be compared as equal or not
* @param compare_nans Control whether floating-point NaNs values should be compared as equal or not
* @param haystack_has_nulls Flag indicating whether haystack has nulls or not
* @param has_any_nulls Flag indicating whether there are nested nulls is either haystack or needles
* @param self_equal Self table comparator
* @param two_table_equal Two table comparator
* @param d_hasher Device hash functor
* @param func The input functor to invoke
*/
template <bool HasNested, typename Hasher, typename Func>
void dispatch_nan_comparator(
null_equality compare_nulls,
nan_equality compare_nans,
bool haystack_has_nulls,
bool has_any_nulls,
cudf::experimental::row::equality::self_comparator self_equal,
cudf::experimental::row::equality::two_table_comparator two_table_equal,
Hasher const& d_hasher,
Func&& func)
{
// Distinguish probing scheme CG sizes between nested and flat types for better performance
auto const probing_scheme = [&]() {
if constexpr (HasNested) {
return cuco::experimental::linear_probing<4, Hasher>{d_hasher};
} else {
return cuco::experimental::linear_probing<1, Hasher>{d_hasher};
}
}();
if (compare_nans == nan_equality::ALL_EQUAL) {
using nan_equal_comparator =
cudf::experimental::row::equality::nan_equal_physical_equality_comparator;
auto const d_self_equal = self_equal.equal_to<HasNested>(
nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, nan_equal_comparator{});
auto const d_two_table_equal = two_table_equal.equal_to<HasNested>(
nullate::DYNAMIC{has_any_nulls}, compare_nulls, nan_equal_comparator{});
func(d_self_equal, d_two_table_equal, probing_scheme);
} else {
using nan_unequal_comparator = cudf::experimental::row::equality::physical_equality_comparator;
auto const d_self_equal = self_equal.equal_to<HasNested>(
nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, nan_unequal_comparator{});
auto const d_two_table_equal = two_table_equal.equal_to<HasNested>(
nullate::DYNAMIC{has_any_nulls}, compare_nulls, nan_unequal_comparator{});
func(d_self_equal, d_two_table_equal, probing_scheme);
}
}
} // namespace
rmm::device_uvector<bool> contains(table_view const& haystack,
table_view const& needles,
null_equality compare_nulls,
nan_equality compare_nans,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(cudf::have_same_types(haystack, needles), "Column types mismatch");
auto const haystack_has_nulls = has_nested_nulls(haystack);
auto const needles_has_nulls = has_nested_nulls(needles);
auto const has_any_nulls = haystack_has_nulls || needles_has_nulls;
auto const preprocessed_needles =
cudf::experimental::row::equality::preprocessed_table::create(needles, stream);
auto const preprocessed_haystack =
cudf::experimental::row::equality::preprocessed_table::create(haystack, stream);
auto const haystack_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_haystack);
auto const d_haystack_hasher = haystack_hasher.device_hasher(nullate::DYNAMIC{has_any_nulls});
auto const needle_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_needles);
auto const d_needle_hasher = needle_hasher.device_hasher(nullate::DYNAMIC{has_any_nulls});
auto const d_hasher = hasher_adapter{d_haystack_hasher, d_needle_hasher};
auto const self_equal = cudf::experimental::row::equality::self_comparator(preprocessed_haystack);
auto const two_table_equal = cudf::experimental::row::equality::two_table_comparator(
preprocessed_haystack, preprocessed_needles);
// The output vector.
auto contained = rmm::device_uvector<bool>(needles.num_rows(), stream, mr);
auto const haystack_iter = cudf::detail::make_counting_transform_iterator(
size_type{0}, [] __device__(auto idx) { return lhs_index_type{idx}; });
auto const needles_iter = cudf::detail::make_counting_transform_iterator(
size_type{0}, [] __device__(auto idx) { return rhs_index_type{idx}; });
auto const helper_func =
[&](auto const& d_self_equal, auto const& d_two_table_equal, auto const& probing_scheme) {
auto const d_equal = comparator_adapter{d_self_equal, d_two_table_equal};
auto set = cuco::experimental::static_set{
cuco::experimental::extent{compute_hash_table_size(haystack.num_rows())},
cuco::empty_key{lhs_index_type{-1}},
d_equal,
probing_scheme,
detail::hash_table_allocator_type{default_allocator<lhs_index_type>{}, stream},
stream.value()};
if (haystack_has_nulls && compare_nulls == null_equality::UNEQUAL) {
auto const bitmask_buffer_and_ptr = build_row_bitmask(haystack, stream);
auto const row_bitmask_ptr = bitmask_buffer_and_ptr.second;
// If the haystack table has nulls but they are compared unequal, don't insert them.
// Otherwise, it was known to cause performance issue:
// - https://github.com/rapidsai/cudf/pull/6943
// - https://github.com/rapidsai/cudf/pull/8277
set.insert_if_async(haystack_iter,
haystack_iter + haystack.num_rows(),
thrust::counting_iterator<size_type>(0), // stencil
row_is_valid{row_bitmask_ptr},
stream.value());
} else {
set.insert_async(haystack_iter, haystack_iter + haystack.num_rows(), stream.value());
}
if (needles_has_nulls && compare_nulls == null_equality::UNEQUAL) {
auto const bitmask_buffer_and_ptr = build_row_bitmask(needles, stream);
auto const row_bitmask_ptr = bitmask_buffer_and_ptr.second;
set.contains_if_async(needles_iter,
needles_iter + needles.num_rows(),
thrust::counting_iterator<size_type>(0), // stencil
row_is_valid{row_bitmask_ptr},
contained.begin(),
stream.value());
} else {
set.contains_async(
needles_iter, needles_iter + needles.num_rows(), contained.begin(), stream.value());
}
};
if (cudf::detail::has_nested_columns(haystack)) {
dispatch_nan_comparator<true>(compare_nulls,
compare_nans,
haystack_has_nulls,
has_any_nulls,
self_equal,
two_table_equal,
d_hasher,
helper_func);
} else {
dispatch_nan_comparator<false>(compare_nulls,
compare_nans,
haystack_has_nulls,
has_any_nulls,
self_equal,
two_table_equal,
d_hasher,
helper_func);
}
return contained;
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/search/contains_scalar.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/pair.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Get the underlying value of a scalar through a scalar device view.
*
* @tparam Element The scalar's value type
* @tparam ScalarDView Type of the input scalar device view
* @param d_scalar The input scalar device view
*/
template <typename Element, typename ScalarDView>
__device__ auto inline get_scalar_value(ScalarDView d_scalar)
{
if constexpr (cudf::is_fixed_point<Element>()) {
return d_scalar.rep();
} else {
return d_scalar.value();
}
}
struct contains_scalar_dispatch {
// SFINAE with conditional return type because we need to support device lambda in this function.
// This is required due to a limitation of nvcc.
template <typename Element>
std::enable_if_t<!is_nested<Element>(), bool> operator()(column_view const& haystack,
scalar const& needle,
rmm::cuda_stream_view stream) const
{
CUDF_EXPECTS(haystack.type() == needle.type(), "Scalar and column types must match");
// Don't need to check for needle validity. If it is invalid, it should be handled by the caller
// before dispatching to this function.
using DType = device_storage_type_t<Element>;
auto const d_haystack = column_device_view::create(haystack, stream);
auto const d_needle = get_scalar_device_view(
static_cast<cudf::scalar_type_t<Element>&>(const_cast<scalar&>(needle)));
auto const begin =
d_haystack->optional_begin<DType>(cudf::nullate::DYNAMIC{haystack.has_nulls()});
auto const end = d_haystack->optional_end<DType>(cudf::nullate::DYNAMIC{haystack.has_nulls()});
return thrust::count_if(
rmm::exec_policy(stream), begin, end, [d_needle] __device__(auto const val_pair) {
auto needle = get_scalar_value<Element>(d_needle);
return val_pair.has_value() && (needle == *val_pair);
}) > 0;
}
template <typename Element>
std::enable_if_t<is_nested<Element>(), bool> operator()(column_view const& haystack,
scalar const& needle,
rmm::cuda_stream_view stream) const
{
CUDF_EXPECTS(haystack.type() == needle.type(), "Scalar and column types must match");
// Don't need to check for needle validity. If it is invalid, it should be handled by the caller
// before dispatching to this function.
// In addition, haystack and needle structure compatibility will be checked later on by
// constructor of the table comparator.
auto const haystack_tv = table_view{{haystack}};
auto const needle_as_col = make_column_from_scalar(needle, 1, stream);
auto const needle_tv = table_view{{needle_as_col->view()}};
auto const has_nulls = has_nested_nulls(haystack_tv) || has_nested_nulls(needle_tv);
auto const comparator =
cudf::experimental::row::equality::two_table_comparator(haystack_tv, needle_tv, stream);
auto const begin = cudf::experimental::row::lhs_iterator(0);
auto const end = begin + haystack.size();
using cudf::experimental::row::rhs_index_type;
auto const check_nulls = haystack.has_nulls();
auto const haystack_cdv_ptr = column_device_view::create(haystack, stream);
auto const d_comp = comparator.equal_to<true>(nullate::DYNAMIC{has_nulls});
// Using a temporary buffer for intermediate transform results from the lambda containing
// the comparator speeds up compile-time significantly without much degradation in
// runtime performance over using the comparator in a transform iterator with thrust::count_if.
auto d_results = rmm::device_uvector<bool>(haystack.size(), stream);
thrust::transform(
rmm::exec_policy(stream),
begin,
end,
d_results.begin(),
[d_comp, check_nulls, d_haystack = *haystack_cdv_ptr] __device__(auto const idx) {
if (check_nulls && d_haystack.is_null_nocheck(static_cast<size_type>(idx))) {
return false;
}
return d_comp(idx, rhs_index_type{0}); // compare haystack[idx] == needle[0].
});
return thrust::count(rmm::exec_policy(stream), d_results.begin(), d_results.end(), true) > 0;
}
};
template <>
bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& haystack,
scalar const& needle,
rmm::cuda_stream_view stream) const
{
auto const dict_col = cudf::dictionary_column_view(haystack);
// first, find the needle in the dictionary's key set
auto const index = cudf::dictionary::detail::get_index(
dict_col, needle, stream, rmm::mr::get_current_device_resource());
// if found, check the index is actually in the indices column
return index->is_valid(stream) && cudf::type_dispatcher(dict_col.indices().type(),
contains_scalar_dispatch{},
dict_col.indices(),
*index,
stream);
}
} // namespace
bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream)
{
if (haystack.is_empty()) { return false; }
if (not needle.is_valid(stream)) { return haystack.has_nulls(); }
return cudf::type_dispatcher(
haystack.type(), contains_scalar_dispatch{}, haystack, needle, stream);
}
} // namespace detail
bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::contains(haystack, needle, stream);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/rolling/grouped_rolling.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "detail/optimized_unbounded_window.hpp"
#include "detail/range_comparator_utils.cuh"
#include "detail/range_window_bounds.hpp"
#include "detail/rolling.cuh"
#include "detail/rolling_jit.hpp"
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/rolling.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/rolling/range_window_bounds.hpp>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/partition.h>
namespace cudf {
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
window_bounds preceding_window,
window_bounds following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
empty_like(input)->view(),
preceding_window,
following_window,
min_periods,
aggr,
mr);
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
return grouped_rolling_window(group_keys,
input,
default_outputs,
window_bounds::get(preceding_window),
window_bounds::get(following_window),
min_periods,
aggr,
mr);
}
namespace detail {
/// Preceding window calculation functor.
template <bool preceding_less_than_1>
struct row_based_preceding_calc {
cudf::size_type const* _group_offsets_begin;
cudf::size_type const* _group_labels_begin;
cudf::size_type const _preceding_window;
row_based_preceding_calc(rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
cudf::size_type const& preceding_window)
: _group_offsets_begin(group_offsets.data()),
_group_labels_begin(group_labels.data()),
_preceding_window(preceding_window)
{
}
__device__ cudf::size_type operator()(cudf::size_type const& idx) const
{
auto group_label = _group_labels_begin[idx];
if constexpr (preceding_less_than_1) { // where 1 indicates only the current row.
auto group_end = _group_offsets_begin[group_label + 1];
return thrust::maximum{}(_preceding_window, -(group_end - 1 - idx));
} else {
auto group_start = _group_offsets_begin[group_label];
return thrust::minimum{}(_preceding_window,
idx - group_start + 1); // Preceding includes current row.
}
}
};
/// Helper to materialize preceding-window column, corrected to respect group boundaries.
/// E.g. If preceding window == 5, then,
/// 1. For the first row in the group, the preceding is set to 1,
/// 2. For the next row in the group, preceding is set to 2, etc.
std::unique_ptr<cudf::column> make_preceding_column(
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
cudf::size_type const& preceding_window,
cudf::size_type const& num_rows,
rmm::cuda_stream_view stream)
{
if (preceding_window < 1) {
auto const calc = row_based_preceding_calc<true>(group_offsets, group_labels, preceding_window);
return cudf::detail::expand_to_column(calc, num_rows, stream);
} else {
auto const calc =
row_based_preceding_calc<false>(group_offsets, group_labels, preceding_window);
return cudf::detail::expand_to_column(calc, num_rows, stream);
}
}
/// Following window calculation functor.
template <bool following_less_than_0>
struct row_based_following_calc {
cudf::size_type const* _group_offsets_begin;
cudf::size_type const* _group_labels_begin;
cudf::size_type const _following_window;
row_based_following_calc(rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
cudf::size_type const& following_window)
: _group_offsets_begin(group_offsets.data()),
_group_labels_begin(group_labels.data()),
_following_window(following_window)
{
}
__device__ cudf::size_type operator()(cudf::size_type const& idx) const
{
auto group_label = _group_labels_begin[idx];
if constexpr (following_less_than_0) {
auto group_start = _group_offsets_begin[group_label];
return thrust::maximum{}(_following_window, -(idx - group_start) - 1);
} else {
auto group_end =
_group_offsets_begin[group_label + 1]; // Cannot fall off the end, since offsets
// is capped with `input.size()`.
return thrust::minimum{}(_following_window, (group_end - 1) - idx);
}
}
};
/// Helper to materialize following-window column, corrected to respect group boundaries.
/// i.e. If following window == 5, then:
/// 1. For the last row in the group, the following is set to 0.
/// 2. For the second last row in the group, following is set to 1, etc.
std::unique_ptr<cudf::column> make_following_column(
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
cudf::size_type const& following_window,
cudf::size_type const& num_rows,
rmm::cuda_stream_view stream)
{
if (following_window < 0) {
auto const calc = row_based_following_calc<true>(group_offsets, group_labels, following_window);
return cudf::detail::expand_to_column(calc, num_rows, stream);
} else {
auto const calc =
row_based_following_calc<false>(group_offsets, group_labels, following_window);
return cudf::detail::expand_to_column(calc, num_rows, stream);
}
}
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
window_bounds preceding_window_bounds,
window_bounds following_window_bounds,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) { return cudf::detail::empty_output_for_rolling_aggregation(input, aggr); }
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
// Detect and bypass fully UNBOUNDED windows.
if (can_optimize_unbounded_window(preceding_window_bounds.is_unbounded(),
following_window_bounds.is_unbounded(),
min_periods,
aggr)) {
return optimized_unbounded_window(group_keys, input, aggr, stream, mr);
}
auto const preceding_window = preceding_window_bounds.value();
auto const following_window = following_window_bounds.value();
CUDF_EXPECTS(-(preceding_window - 1) <= following_window,
"Preceding window bounds must precede the following window bounds.");
if (group_keys.num_columns() == 0) {
// No Groupby columns specified. Treat as one big group.
return rolling_window(
input, default_outputs, preceding_window, following_window, min_periods, aggr, mr);
}
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES, {}};
auto const& group_offsets{helper.group_offsets(stream)};
auto const& group_labels{helper.group_labels(stream)};
// `group_offsets` are interpreted in adjacent pairs, each pair representing the offsets
// of the first, and one past the last elements in a group.
//
// If `group_offsets` is not empty, it must contain at least two offsets:
// a. 0, indicating the first element in `input`
// b. input.size(), indicating one past the last element in `input`.
//
// Thus, for an input of 1000 rows,
// 0. [] indicates a single group, spanning the entire column.
// 1 [10] is invalid.
// 2. [0, 1000] indicates a single group, spanning the entire column (thus, equivalent to no
// groups.)
// 3. [0, 500, 1000] indicates two equal-sized groups: [0,500), and [500,1000).
assert(group_offsets.size() >= 2 && group_offsets.element(0, stream) == 0 &&
group_offsets.element(group_offsets.size() - 1, stream) == input.size() &&
"Must have at least one group.");
if (aggr.kind == aggregation::CUDA || aggr.kind == aggregation::PTX) {
cudf::detail::preceding_window_wrapper grouped_preceding_window{
group_offsets.data(), group_labels.data(), preceding_window};
cudf::detail::following_window_wrapper grouped_following_window{
group_offsets.data(), group_labels.data(), following_window};
return cudf::detail::rolling_window_udf(input,
grouped_preceding_window,
"cudf::detail::preceding_window_wrapper",
grouped_following_window,
"cudf::detail::following_window_wrapper",
min_periods,
aggr,
stream,
mr);
} else {
auto const preceding_column =
make_preceding_column(group_offsets, group_labels, preceding_window, input.size(), stream);
auto const following_column =
make_following_column(group_offsets, group_labels, following_window, input.size(), stream);
return cudf::detail::rolling_window(input,
default_outputs,
preceding_column->view().begin<cudf::size_type>(),
following_column->view().begin<cudf::size_type>(),
min_periods,
aggr,
stream,
mr);
}
}
} // namespace detail
std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
window_bounds preceding_window_bounds,
window_bounds following_window_bounds,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
return detail::grouped_rolling_window(group_keys,
input,
default_outputs,
preceding_window_bounds,
following_window_bounds,
min_periods,
aggr,
cudf::get_default_stream(),
mr);
}
namespace {
/**
* @brief For a specified idx, find the lowest value of the (sorted) orderby column that
* participates in a range-window query.
*/
template <typename ElementT, typename ElementIter>
__device__ ElementT compute_lowest_in_window(ElementIter orderby_iter,
size_type idx,
[[maybe_unused]] ElementT delta)
{
if constexpr (std::is_same_v<ElementT, cudf::string_view>) {
return orderby_iter[idx];
} else {
return cudf::detail::subtract_safe(orderby_iter[idx], delta);
}
}
/**
* @brief For a specified idx, find the highest value of the (sorted) orderby column that
* participates in a range-window query.
*/
template <typename ElementT, typename ElementIter>
__device__ ElementT compute_highest_in_window(ElementIter orderby_iter,
size_type idx,
[[maybe_unused]] ElementT delta)
{
if constexpr (std::is_same_v<ElementT, cudf::string_view>) {
return orderby_iter[idx];
} else {
return cudf::detail::add_safe(orderby_iter[idx], delta);
}
}
/**
* Accessor for values in an order-by column, on the device.
*/
template <typename T>
struct device_value_accessor {
column_device_view const col; ///< column view of column in device
/// Checks that the type used to access device values matches the rep-type
/// of the order-by column.
struct is_correct_range_rep {
template <typename U> /// Order-by type.
constexpr bool operator()() const
{
return std::is_same_v<T, cudf::detail::range_rep_type<U>>;
}
};
/**
* @brief constructor
*
* @param[in] col_ column device view of cudf column
*/
explicit __device__ device_value_accessor(column_device_view const& col_) : col{col_}
{
// For non-timestamp types, T must match the order-by column's type.
// For timestamp types, T must match the range rep type for the order-by column.
cudf_assert((type_id_matches_device_storage_type<T>(col.type().id()) or
cudf::type_dispatcher(col.type(), is_correct_range_rep{})) &&
"data type mismatch when accessing the order-by column");
}
/**
* @brief Returns the value of element at index `i`
* @param[in] i index of element
* @return value of element at index `i`
*/
__device__ T operator()(cudf::size_type i) const { return col.element<T>(i); }
};
template <typename T>
using const_device_iterator =
thrust::transform_iterator<device_value_accessor<T>, thrust::counting_iterator<size_type>>;
/// This is a stand-in for the `cudf::column_device_view::begin<T>()`, which is `__host__` only.
/// For range window functions, one might need to iterate over the order-by column, per row.
template <typename T, CUDF_ENABLE_IF(cudf::column_device_view::has_element_accessor<T>())>
[[nodiscard]] __device__ const_device_iterator<T> begin(cudf::column_device_view const& col)
{
return const_device_iterator<T>{thrust::make_counting_iterator<cudf::size_type>(0),
device_value_accessor<T>{col}};
}
/// Given a single, ungrouped order-by column, return the indices corresponding
/// to the first null element, and (one past) the last null timestamp.
/// The input column is sorted, with all null values clustered either
/// at the beginning of the column or at the end.
/// If no null values are founds, null_begin and null_end are 0.
std::tuple<size_type, size_type> get_null_bounds_for_orderby_column(
column_view const& orderby_column)
{
auto const num_rows = orderby_column.size();
auto const num_nulls = orderby_column.null_count();
if (num_nulls == num_rows || num_nulls == 0) {
// Short-circuit: All nulls, or no nulls.
return std::make_tuple(0, num_nulls);
}
auto const first_row_is_null = orderby_column.null_count(0, 1) == 1;
return first_row_is_null ? std::make_tuple(0, num_nulls)
: std::make_tuple(num_rows - num_nulls, num_rows);
}
/// Range window computation, with
/// 1. no grouping keys specified
/// 2. rows in ASCENDING order.
/// Treat as one single group.
template <typename T>
std::unique_ptr<column> range_window_ASC(column_view const& input,
column_view const& orderby_column,
T preceding_window,
bool preceding_window_is_unbounded,
T following_window,
bool following_window_is_unbounded,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [h_nulls_begin_idx, h_nulls_end_idx] = get_null_bounds_for_orderby_column(orderby_column);
auto const p_orderby_device_view = cudf::column_device_view::create(orderby_column, stream);
auto const preceding_calculator =
[nulls_begin_idx = h_nulls_begin_idx,
nulls_end_idx = h_nulls_end_idx,
orderby_device_view = *p_orderby_device_view,
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto const group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto const lowest_in_window = compute_lowest_in_window(d_orderby, idx, preceding_window);
return ((d_orderby + idx) - thrust::lower_bound(thrust::seq,
d_orderby + group_start,
d_orderby + idx,
lowest_in_window,
cudf::detail::nan_aware_less{})) +
1; // Add 1, for `preceding` to account for current row.
};
auto const preceding_column =
cudf::detail::expand_to_column(preceding_calculator, input.size(), stream);
auto const following_calculator =
[nulls_begin_idx = h_nulls_begin_idx,
nulls_end_idx = h_nulls_end_idx,
num_rows = input.size(),
orderby_device_view = *p_orderby_device_view,
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return num_rows - idx - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search ends at num_rows.
// 2. NO NULLS: Binary search also ends at num_rows.
// Otherwise, NULLS LAST ordering. End at nulls_begin_idx.
auto const group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto const highest_in_window = compute_highest_in_window(d_orderby, idx, following_window);
return (thrust::upper_bound(thrust::seq,
d_orderby + idx,
d_orderby + group_end,
highest_in_window,
cudf::detail::nan_aware_less{}) -
(d_orderby + idx)) -
1;
};
auto const following_column =
cudf::detail::expand_to_column(following_calculator, input.size(), stream);
return cudf::detail::rolling_window(
input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr);
}
// Given an orderby column grouped as specified in group_offsets,
// return the following two vectors:
// 1. Vector with one entry per group, indicating the offset in the group
// where the null values begin.
// 2. Vector with one entry per group, indicating the offset in the group
// where the null values end. (i.e. 1 past the last null.)
// Each group in the input orderby column must be sorted,
// with null values clustered at either the start or the end of each group.
// If there are no nulls for any given group, (nulls_begin, nulls_end) == (0,0).
std::tuple<rmm::device_uvector<size_type>, rmm::device_uvector<size_type>>
get_null_bounds_for_orderby_column(column_view const& orderby_column,
cudf::device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream)
{
// For each group, the null values are clustered at the beginning or the end of the group.
// These nulls cannot participate, except in their own window.
auto const num_groups = group_offsets.size() - 1;
if (orderby_column.has_nulls()) {
auto null_start = rmm::device_uvector<size_type>(num_groups, stream);
auto null_end = rmm::device_uvector<size_type>(num_groups, stream);
auto p_orderby_device_view = column_device_view::create(orderby_column, stream);
// Null timestamps exist. Find null bounds, per group.
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(static_cast<size_type>(0)),
thrust::make_counting_iterator(static_cast<size_type>(num_groups)),
[d_orderby = *p_orderby_device_view,
d_group_offsets = group_offsets.data(),
d_null_start = null_start.data(),
d_null_end = null_end.data()] __device__(auto group_label) {
auto group_start = d_group_offsets[group_label];
auto group_end = d_group_offsets[group_label + 1];
auto first_element_is_null = d_orderby.is_null_nocheck(group_start);
auto last_element_is_null = d_orderby.is_null_nocheck(group_end - 1);
if (!first_element_is_null && !last_element_is_null) {
// Short circuit: No nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_start;
} else if (first_element_is_null && last_element_is_null) {
// Short circuit: All nulls.
d_null_start[group_label] = group_start;
d_null_end[group_label] = group_end;
} else if (first_element_is_null) {
// NULLS FIRST.
d_null_start[group_label] = group_start;
d_null_end[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_orderby] __device__(auto i) { return d_orderby.is_null_nocheck(i); });
} else {
// NULLS LAST.
d_null_end[group_label] = group_end;
d_null_start[group_label] = *thrust::partition_point(
thrust::seq,
thrust::make_counting_iterator(group_start),
thrust::make_counting_iterator(group_end),
[&d_orderby] __device__(auto i) { return d_orderby.is_valid_nocheck(i); });
}
});
return std::make_tuple(std::move(null_start), std::move(null_end));
} else {
// The returned vectors have num_groups items, but the input offsets have num_groups+1
// Drop the last element using a span
auto const group_offsets_span =
cudf::device_span<cudf::size_type const>(group_offsets.data(), num_groups);
// When there are no nulls, just copy the input group offsets to the output.
return std::make_tuple(cudf::detail::make_device_uvector_async(
group_offsets_span, stream, rmm::mr::get_current_device_resource()),
cudf::detail::make_device_uvector_async(
group_offsets_span, stream, rmm::mr::get_current_device_resource()));
}
}
// Range window computation, for orderby column in ASCENDING order.
template <typename T>
std::unique_ptr<column> range_window_ASC(column_view const& input,
column_view const& orderby_column,
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
T preceding_window,
bool preceding_window_is_unbounded,
T following_window,
bool following_window_is_unbounded,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [null_start, null_end] =
get_null_bounds_for_orderby_column(orderby_column, group_offsets, stream);
auto const p_orderby_device_view = cudf::column_device_view::create(orderby_column, stream);
auto const preceding_calculator =
[d_group_offsets = group_offsets.data(),
d_group_labels = group_labels.data(),
orderby_device_view = *p_orderby_device_view,
d_nulls_begin = null_start.data(),
d_nulls_end = null_end.data(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto const group_label = d_group_labels[idx];
auto const group_start = d_group_offsets[group_label];
auto const nulls_begin = d_nulls_begin[group_label];
auto const nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return idx - group_start + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto const search_start = nulls_begin == group_start ? nulls_end : group_start;
auto const lowest_in_window = compute_lowest_in_window(d_orderby, idx, preceding_window);
return ((d_orderby + idx) - thrust::lower_bound(thrust::seq,
d_orderby + search_start,
d_orderby + idx,
lowest_in_window,
cudf::detail::nan_aware_less{})) +
1; // Add 1, for `preceding` to account for current row.
};
auto const preceding_column =
cudf::detail::expand_to_column(preceding_calculator, input.size(), stream);
auto const following_calculator =
[d_group_offsets = group_offsets.data(),
d_group_labels = group_labels.data(),
orderby_device_view = *p_orderby_device_view,
d_nulls_begin = null_start.data(),
d_nulls_end = null_end.data(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto const group_label = d_group_labels[idx];
auto const group_start = d_group_offsets[group_label];
auto const group_end =
d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets
// is capped with `input.size()`.
auto const nulls_begin = d_nulls_begin[group_label];
auto const nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto const search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto const highest_in_window = compute_highest_in_window(d_orderby, idx, following_window);
return (thrust::upper_bound(thrust::seq,
d_orderby + idx,
d_orderby + search_end,
highest_in_window,
cudf::detail::nan_aware_less{}) -
(d_orderby + idx)) -
1;
};
auto const following_column =
cudf::detail::expand_to_column(following_calculator, input.size(), stream);
return cudf::detail::rolling_window(
input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr);
}
/// Range window computation, with
/// 1. no grouping keys specified
/// 2. rows in DESCENDING order.
/// Treat as one single group.
template <typename T>
std::unique_ptr<column> range_window_DESC(column_view const& input,
column_view const& orderby_column,
T preceding_window,
bool preceding_window_is_unbounded,
T following_window,
bool following_window_is_unbounded,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [h_nulls_begin_idx, h_nulls_end_idx] = get_null_bounds_for_orderby_column(orderby_column);
auto const p_orderby_device_view = cudf::column_device_view::create(orderby_column, stream);
auto const preceding_calculator =
[nulls_begin_idx = h_nulls_begin_idx,
nulls_end_idx = h_nulls_end_idx,
orderby_device_view = *p_orderby_device_view,
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
if (preceding_window_is_unbounded) {
return idx + 1; // Technically `idx - 0 + 1`,
// where 0 == Group start,
// and 1 accounts for the current row
}
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Must consider beginning of null-group as window start.
return idx - nulls_begin_idx + 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Binary search the group, excluding null group.
// If nulls_begin_idx == 0, either
// 1. NULLS FIRST ordering: Binary search starts where nulls_end_idx.
// 2. NO NULLS: Binary search starts at 0 (also nulls_end_idx).
// Otherwise, NULLS LAST ordering. Start at 0.
auto const group_start = nulls_begin_idx == 0 ? nulls_end_idx : 0;
auto const highest_in_window = compute_highest_in_window(d_orderby, idx, preceding_window);
return ((d_orderby + idx) - thrust::lower_bound(thrust::seq,
d_orderby + group_start,
d_orderby + idx,
highest_in_window,
cudf::detail::nan_aware_greater{})) +
1; // Add 1, for `preceding` to account for current row.
};
auto const preceding_column =
cudf::detail::expand_to_column(preceding_calculator, input.size(), stream);
auto const following_calculator =
[nulls_begin_idx = h_nulls_begin_idx,
nulls_end_idx = h_nulls_end_idx,
num_rows = input.size(),
orderby_device_view = *p_orderby_device_view,
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
if (following_window_is_unbounded) { return (num_rows - idx) - 1; }
if (idx >= nulls_begin_idx && idx < nulls_end_idx) {
// Current row is in the null group.
// Window ends at the end of the null group.
return nulls_end_idx - idx - 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Search must exclude null group.
// If nulls_begin_idx = 0, either
// 1. NULLS FIRST ordering: Search ends at num_rows.
// 2. NO NULLS: Search also ends at num_rows.
// Otherwise, NULLS LAST ordering: End at nulls_begin_idx.
auto const group_end = nulls_begin_idx == 0 ? num_rows : nulls_begin_idx;
auto const lowest_in_window = compute_lowest_in_window(d_orderby, idx, following_window);
return (thrust::upper_bound(thrust::seq,
d_orderby + idx,
d_orderby + group_end,
lowest_in_window,
cudf::detail::nan_aware_greater{}) -
(d_orderby + idx)) -
1;
};
auto const following_column =
cudf::detail::expand_to_column(following_calculator, input.size(), stream);
return cudf::detail::rolling_window(
input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr);
}
// Range window computation, for rows in DESCENDING order.
template <typename T>
std::unique_ptr<column> range_window_DESC(column_view const& input,
column_view const& orderby_column,
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
T preceding_window,
bool preceding_window_is_unbounded,
T following_window,
bool following_window_is_unbounded,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [null_start, null_end] =
get_null_bounds_for_orderby_column(orderby_column, group_offsets, stream);
auto const p_orderby_device_view = cudf::column_device_view::create(orderby_column, stream);
auto const preceding_calculator =
[d_group_offsets = group_offsets.data(),
d_group_labels = group_labels.data(),
orderby_device_view = *p_orderby_device_view,
d_nulls_begin = null_start.data(),
d_nulls_end = null_end.data(),
preceding_window,
preceding_window_is_unbounded] __device__(size_type idx) -> size_type {
auto const group_label = d_group_labels[idx];
auto const group_start = d_group_offsets[group_label];
auto const nulls_begin = d_nulls_begin[group_label];
auto const nulls_end = d_nulls_end[group_label];
if (preceding_window_is_unbounded) { return (idx - group_start) + 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window starts at the start of the null group.
return idx - nulls_begin + 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search must begin at nulls_end.
// 2. NO NULLS: Search must begin at group_start (which also equals nulls_end.)
// Otherwise, NULLS LAST ordering. Search must start at nulls group_start.
auto const search_start = nulls_begin == group_start ? nulls_end : group_start;
auto const highest_in_window = compute_highest_in_window(d_orderby, idx, preceding_window);
return ((d_orderby + idx) - thrust::lower_bound(thrust::seq,
d_orderby + search_start,
d_orderby + idx,
highest_in_window,
cudf::detail::nan_aware_greater{})) +
1; // Add 1, for `preceding` to account for current row.
};
auto const preceding_column =
cudf::detail::expand_to_column(preceding_calculator, input.size(), stream);
auto const following_calculator =
[d_group_offsets = group_offsets.data(),
d_group_labels = group_labels.data(),
orderby_device_view = *p_orderby_device_view,
d_nulls_begin = null_start.data(),
d_nulls_end = null_end.data(),
following_window,
following_window_is_unbounded] __device__(size_type idx) -> size_type {
auto const group_label = d_group_labels[idx];
auto const group_start = d_group_offsets[group_label];
auto const group_end = d_group_offsets[group_label + 1];
auto const nulls_begin = d_nulls_begin[group_label];
auto const nulls_end = d_nulls_end[group_label];
if (following_window_is_unbounded) { return (group_end - idx) - 1; }
// If idx lies in the null-range, the window is the null range.
if (idx >= nulls_begin && idx < nulls_end) {
// Current row is in the null group.
// The window ends at the end of the null group.
return nulls_end - idx - 1;
}
auto const d_orderby = begin<T>(orderby_device_view);
// orderby[idx] not null. Search must exclude the null group.
// If nulls_begin == group_start, either of the following is true:
// 1. NULLS FIRST ordering: Search ends at group_end.
// 2. NO NULLS: Search ends at group_end.
// Otherwise, NULLS LAST ordering. Search ends at nulls_begin.
auto const search_end = nulls_begin == group_start ? group_end : nulls_begin;
auto const lowest_in_window = compute_lowest_in_window(d_orderby, idx, following_window);
return (thrust::upper_bound(thrust::seq,
d_orderby + idx,
d_orderby + search_end,
lowest_in_window,
cudf::detail::nan_aware_greater{}) -
(d_orderby + idx)) -
1;
};
auto const following_column =
cudf::detail::expand_to_column(following_calculator, input.size(), stream);
if (aggr.kind == aggregation::CUDA || aggr.kind == aggregation::PTX) {
CUDF_FAIL("Ranged rolling window does NOT (yet) support UDF.");
} else {
return cudf::detail::rolling_window(
input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr);
}
}
template <typename OrderByT>
std::unique_ptr<column> grouped_range_rolling_window_impl(
column_view const& input,
column_view const& orderby_column,
cudf::order const& order_of_orderby_column,
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
range_window_bounds const& preceding_window,
range_window_bounds const& following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [preceding_value, following_value] = [&] {
if constexpr (std::is_same_v<OrderByT, cudf::string_view>) {
CUDF_EXPECTS(
preceding_window.is_unbounded() || preceding_window.is_current_row(),
"For STRING order-by column, preceding range has to be either UNBOUNDED or CURRENT ROW.");
CUDF_EXPECTS(
following_window.is_unbounded() || following_window.is_current_row(),
"For STRING order-by column, following range has to be either UNBOUNDED or CURRENT ROW.");
return std::pair{cudf::string_view{}, cudf::string_view{}};
} else {
return std::pair{
detail::range_comparable_value<OrderByT>(preceding_window, orderby_column.type(), stream),
detail::range_comparable_value<OrderByT>(following_window, orderby_column.type(), stream)};
}
}();
if (order_of_orderby_column == cudf::order::ASCENDING) {
return group_offsets.is_empty() ? range_window_ASC(input,
orderby_column,
preceding_value,
preceding_window.is_unbounded(),
following_value,
following_window.is_unbounded(),
min_periods,
aggr,
stream,
mr)
: range_window_ASC(input,
orderby_column,
group_offsets,
group_labels,
preceding_value,
preceding_window.is_unbounded(),
following_value,
following_window.is_unbounded(),
min_periods,
aggr,
stream,
mr);
} else {
return group_offsets.is_empty() ? range_window_DESC(input,
orderby_column,
preceding_value,
preceding_window.is_unbounded(),
following_value,
following_window.is_unbounded(),
min_periods,
aggr,
stream,
mr)
: range_window_DESC(input,
orderby_column,
group_offsets,
group_labels,
preceding_value,
preceding_window.is_unbounded(),
following_value,
following_window.is_unbounded(),
min_periods,
aggr,
stream,
mr);
}
}
struct dispatch_grouped_range_rolling_window {
template <typename OrderByColumnType, typename... Args>
std::enable_if_t<!detail::is_supported_order_by_column_type<OrderByColumnType>(),
std::unique_ptr<column>>
operator()(Args&&...) const
{
CUDF_FAIL("Unsupported OrderBy column type.");
}
template <typename OrderByColumnType>
std::enable_if_t<detail::is_supported_order_by_column_type<OrderByColumnType>(),
std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& orderby_column,
cudf::order const& order_of_orderby_column,
rmm::device_uvector<cudf::size_type> const& group_offsets,
rmm::device_uvector<cudf::size_type> const& group_labels,
range_window_bounds const& preceding_window,
range_window_bounds const& following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
return grouped_range_rolling_window_impl<OrderByColumnType>(input,
orderby_column,
order_of_orderby_column,
group_offsets,
group_labels,
preceding_window,
following_window,
min_periods,
aggr,
stream,
mr);
}
};
/**
* @brief Functor to convert from size_type (number of days) to appropriate duration type.
*/
struct to_duration_bounds {
template <typename OrderBy, std::enable_if_t<cudf::is_timestamp<OrderBy>(), void>* = nullptr>
range_window_bounds operator()(size_type num_days) const
{
using DurationT = typename OrderBy::duration;
return range_window_bounds::get(duration_scalar<DurationT>{duration_D{num_days}, true});
}
template <typename OrderBy, std::enable_if_t<!cudf::is_timestamp<OrderBy>(), void>* = nullptr>
range_window_bounds operator()(size_type) const
{
CUDF_FAIL("Expected timestamp orderby column.");
}
};
/**
* @brief Get duration type corresponding to specified timestamp type.
*/
data_type get_duration_type_for(cudf::data_type timestamp_type)
{
switch (timestamp_type.id()) {
case type_id::TIMESTAMP_DAYS: return data_type{type_id::DURATION_DAYS};
case type_id::TIMESTAMP_SECONDS: return data_type{type_id::DURATION_SECONDS};
case type_id::TIMESTAMP_MILLISECONDS: return data_type{type_id::DURATION_MILLISECONDS};
case type_id::TIMESTAMP_MICROSECONDS: return data_type{type_id::DURATION_MICROSECONDS};
case type_id::TIMESTAMP_NANOSECONDS: return data_type{type_id::DURATION_NANOSECONDS};
default: CUDF_FAIL("Expected timestamp orderby column.");
}
}
/**
* @brief Bridge function to convert from size_type (number of days) to appropriate duration type.
*
* This helps adapt the old `grouped_time_range_rolling_window()` functions that took a "number of
* days" to the new `range_window_bounds` interface.
*
* @param num_days Window bounds specified in number of days in `size_type`
* @param timestamp_type Data-type of the orderby column to which the `num_days` is to be adapted.
* @return range_window_bounds A `range_window_bounds` to be used with the new API.
*/
range_window_bounds to_range_bounds(cudf::size_type num_days, cudf::data_type timestamp_type)
{
return cudf::type_dispatcher(timestamp_type, to_duration_bounds{}, num_days);
}
/**
* @brief Bridge function to convert from `window_bounds` (in days) to appropriate duration type.
*
* This helps adapt the old `grouped_time_range_rolling_window()` functions that took a
* `window_bounds` to the new `range_window_bounds` interface.
*
* @param days_bounds The static window-width `window_bounds` object
* @param timestamp_type Data-type of the orderby column to which the `num_days` is to be adapted.
* @return range_window_bounds A `range_window_bounds` to be used with the new API.
*/
range_window_bounds to_range_bounds(cudf::window_bounds const& days_bounds,
cudf::data_type timestamp_type)
{
return days_bounds.is_unbounded()
? range_window_bounds::unbounded(get_duration_type_for(timestamp_type))
: cudf::type_dispatcher(timestamp_type, to_duration_bounds{}, days_bounds.value());
}
} // namespace
namespace detail {
/**
* @copydoc std::unique_ptr<column> grouped_range_rolling_window(
* table_view const& group_keys,
* column_view const& orderby_column,
* cudf::order const& order,
* column_view const& input,
* range_window_bounds const& preceding,
* range_window_bounds const& following,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr );
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> grouped_range_rolling_window(table_view const& group_keys,
column_view const& order_by_column,
cudf::order const& order,
column_view const& input,
range_window_bounds const& preceding,
range_window_bounds const& following,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) { return cudf::detail::empty_output_for_rolling_aggregation(input, aggr); }
CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()),
"Size mismatch between group_keys and input vector.");
CUDF_EXPECTS((min_periods > 0), "min_periods must be positive");
// Detect and bypass fully UNBOUNDED windows.
if (can_optimize_unbounded_window(
preceding.is_unbounded(), following.is_unbounded(), min_periods, aggr)) {
return optimized_unbounded_window(group_keys, input, aggr, stream, mr);
}
using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper;
using index_vector = sort_groupby_helper::index_vector;
index_vector group_offsets(0, stream), group_labels(0, stream);
if (group_keys.num_columns() > 0) {
sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES, {}};
group_offsets = index_vector(helper.group_offsets(stream), stream);
group_labels = index_vector(helper.group_labels(stream), stream);
}
return cudf::type_dispatcher(order_by_column.type(),
dispatch_grouped_range_rolling_window{},
input,
order_by_column,
order,
group_offsets,
group_labels,
preceding,
following,
min_periods,
aggr,
stream,
mr);
}
} // namespace detail
/**
* @copydoc std::unique_ptr<column> grouped_time_range_rolling_window(
* table_view const& group_keys,
* column_view const& timestamp_column,
* cudf::order const& timestamp_order,
* column_view const& input,
* size_type preceding_window_in_days,
* size_type following_window_in_days,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr);
*/
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
size_type preceding_window_in_days,
size_type following_window_in_days,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto preceding = to_range_bounds(preceding_window_in_days, timestamp_column.type());
auto following = to_range_bounds(following_window_in_days, timestamp_column.type());
return detail::grouped_range_rolling_window(group_keys,
timestamp_column,
timestamp_order,
input,
preceding,
following,
min_periods,
aggr,
cudf::get_default_stream(),
mr);
}
/**
* @copydoc grouped_time_range_rolling_window(
* table_view const& group_keys,
* column_view const& timestamp_column,
* cudf::order const& timestamp_order,
* column_view const& input,
* window_bounds preceding_window_in_days,
* window_bounds following_window_in_days,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr);
*/
std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
window_bounds preceding_window_in_days,
window_bounds following_window_in_days,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
range_window_bounds preceding =
to_range_bounds(preceding_window_in_days, timestamp_column.type());
range_window_bounds following =
to_range_bounds(following_window_in_days, timestamp_column.type());
return detail::grouped_range_rolling_window(group_keys,
timestamp_column,
timestamp_order,
input,
preceding,
following,
min_periods,
aggr,
cudf::get_default_stream(),
mr);
}
/**
* @copydoc grouped_range_rolling_window(
* table_view const& group_keys,
* column_view const& orderby_column,
* cudf::order const& order,
* column_view const& input,
* range_window_bounds const& preceding,
* range_window_bounds const& following,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr );
*/
std::unique_ptr<column> grouped_range_rolling_window(table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
range_window_bounds const& preceding,
range_window_bounds const& following,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::grouped_range_rolling_window(group_keys,
timestamp_column,
timestamp_order,
input,
preceding,
following,
min_periods,
aggr,
cudf::get_default_stream(),
mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/rolling/range_window_bounds.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "detail/range_window_bounds.hpp"
#include <cudf/rolling/range_window_bounds.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/wrappers/durations.hpp>
namespace cudf {
namespace {
/**
* @brief Factory to (copy) construct scalars.
*
* Derived types of scalars are cloned, to be adopted by `range_window_bounds`.
* This makes it possible to copy construct and copy assign `range_window_bounds` objects.
*/
struct range_scalar_constructor {
template <typename T, CUDF_ENABLE_IF(not detail::is_supported_range_type<T>())>
std::unique_ptr<scalar> operator()(scalar const& range_scalar_) const
{
CUDF_FAIL(
"Unsupported range type. "
"Only durations, fixed-point, and non-boolean numeric range types are allowed.");
}
template <typename T, CUDF_ENABLE_IF(cudf::is_duration<T>())>
std::unique_ptr<scalar> operator()(scalar const& range_scalar_) const
{
return std::make_unique<duration_scalar<T>>(
static_cast<duration_scalar<T> const&>(range_scalar_));
}
template <typename T, CUDF_ENABLE_IF(cudf::is_numeric<T>() && not cudf::is_boolean<T>())>
std::unique_ptr<scalar> operator()(scalar const& range_scalar_) const
{
return std::make_unique<numeric_scalar<T>>(
static_cast<numeric_scalar<T> const&>(range_scalar_));
}
template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_point<T>())>
std::unique_ptr<scalar> operator()(scalar const& range_scalar_) const
{
return std::make_unique<fixed_point_scalar<T>>(
static_cast<fixed_point_scalar<T> const&>(range_scalar_));
}
};
} // namespace
range_window_bounds::range_window_bounds(extent_type extent_, std::unique_ptr<scalar> range_scalar_)
: _extent{extent_}, _range_scalar{std::move(range_scalar_)}
{
CUDF_EXPECTS(_range_scalar.get(), "Range window scalar cannot be null.");
CUDF_EXPECTS(_extent == extent_type::UNBOUNDED || _extent == extent_type::CURRENT_ROW ||
_range_scalar->is_valid(),
"Bounded Range window scalar must be valid.");
}
range_window_bounds range_window_bounds::unbounded(data_type type)
{
return {extent_type::UNBOUNDED, make_default_constructed_scalar(type)};
}
range_window_bounds range_window_bounds::current_row(data_type type)
{
return {extent_type::CURRENT_ROW, make_default_constructed_scalar(type)};
}
range_window_bounds range_window_bounds::get(scalar const& boundary)
{
return {extent_type::BOUNDED,
cudf::type_dispatcher(boundary.type(), range_scalar_constructor{}, boundary)};
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/rolling/rolling.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "detail/rolling.cuh"
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/utilities/default_stream.hpp>
namespace cudf {
// Applies a fixed-size rolling window function to the values in a column, with default output
// specified
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rolling_window(input,
default_outputs,
preceding_window,
following_window,
min_periods,
agg,
cudf::get_default_stream(),
mr);
}
// Applies a fixed-size rolling window function to the values in a column, without default specified
std::unique_ptr<column> rolling_window(column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto defaults =
cudf::is_dictionary(input.type()) ? dictionary_column_view(input).indices() : input;
return detail::rolling_window(input,
empty_like(defaults)->view(),
preceding_window,
following_window,
min_periods,
agg,
cudf::get_default_stream(),
mr);
}
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rolling_window(
input, preceding_window, following_window, min_periods, agg, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "lead_lag_nested.cuh"
#include "nth_element.cuh"
#include "rolling.hpp"
#include "rolling_collect_list.cuh"
#include "rolling_jit.hpp"
#include <reductions/nested_type_minmax_util.cuh>
#include <cudf/aggregation.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/lists/detail/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <jit/cache.hpp>
#include <jit/parser.hpp>
#include <jit/util.hpp>
#include <jit_preprocessed_files/rolling/jit/kernel.cu.jit.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/reduce.h>
#include <cuda/std/climits>
#include <cuda/std/limits>
#include <memory>
namespace cudf {
namespace detail {
/// Helper function to materialize preceding/following offsets.
template <typename Calculator>
std::unique_ptr<column> expand_to_column(Calculator const& calc,
size_type const& num_rows,
rmm::cuda_stream_view stream)
{
auto window_column = cudf::make_numeric_column(
cudf::data_type{type_to_id<size_type>()}, num_rows, cudf::mask_state::UNALLOCATED, stream);
auto begin = cudf::detail::make_counting_transform_iterator(0, calc);
thrust::copy_n(
rmm::exec_policy(stream), begin, num_rows, window_column->mutable_view().data<size_type>());
return window_column;
}
/**
* @brief Operator for applying a generic (non-specialized) rolling aggregation on a single window.
*/
template <typename InputType, aggregation::Kind op>
struct DeviceRolling {
size_type min_periods;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = op>
static constexpr bool is_supported()
{
return cudf::detail::is_valid_aggregation<T, O>() && has_corresponding_operator<O>() &&
// MIN/MAX only supports fixed width types
(((O == aggregation::MIN || O == aggregation::MAX) && cudf::is_fixed_width<T>()) ||
(O == aggregation::SUM) || (O == aggregation::MEAN));
}
// operations we do support
template <typename T = InputType, aggregation::Kind O = op>
explicit DeviceRolling(size_type _min_periods, std::enable_if_t<is_supported<T, O>()>* = nullptr)
: min_periods(_min_periods)
{
}
// operations we don't support
template <typename T = InputType, aggregation::Kind O = op>
explicit DeviceRolling(size_type _min_periods, std::enable_if_t<!is_supported<T, O>()>* = nullptr)
: min_periods(_min_periods)
{
CUDF_FAIL("Invalid aggregation/type pair");
}
// perform the windowing operation
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index) const
{
using AggOp = typename corresponding_operator<op>::type;
AggOp agg_op;
cudf::size_type count = 0;
OutputType val = AggOp::template identity<OutputType>();
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
OutputType element = input.element<device_storage_type_t<InputType>>(j);
val = agg_op(element, val);
count++;
}
}
bool output_is_valid = (count >= min_periods);
// store the output value, one per thread
cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}(
output.element<OutputType>(current_index), val, count);
return output_is_valid;
}
};
/**
* @brief The base struct used for checking if the combination of input type and aggregation op is
* supported.
*/
template <typename InputType, aggregation::Kind op>
struct DeviceRollingArgMinMaxBase {
size_type min_periods;
explicit DeviceRollingArgMinMaxBase(size_type _min_periods) : min_periods(_min_periods) {}
static constexpr bool is_supported()
{
// Right now only support ARGMIN/ARGMAX of strings and structs.
auto const type_supported =
std::is_same_v<InputType, cudf::string_view> || std::is_same_v<InputType, cudf::struct_view>;
auto const op_supported = op == aggregation::Kind::ARGMIN || op == aggregation::Kind::ARGMAX;
return type_supported && op_supported;
}
};
/**
* @brief Operator for applying an ARGMAX/ARGMIN rolling aggregation on a single window for string.
*/
template <aggregation::Kind op>
struct DeviceRollingArgMinMaxString : DeviceRollingArgMinMaxBase<cudf::string_view, op> {
explicit DeviceRollingArgMinMaxString(size_type _min_periods)
: DeviceRollingArgMinMaxBase<cudf::string_view, op>(_min_periods)
{
}
using DeviceRollingArgMinMaxBase<cudf::string_view, op>::min_periods;
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index)
{
auto constexpr default_output = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL;
using InputType = cudf::string_view;
using AggOp = typename corresponding_operator<op>::type;
AggOp agg_op;
cudf::size_type count = 0;
InputType val = AggOp::template identity<InputType>();
OutputType val_index = default_output;
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
InputType element = input.element<InputType>(j);
val = agg_op(element, val);
if (val == element) { val_index = j; }
count++;
}
}
bool output_is_valid = (count >= min_periods);
// Use the sentinel value (i.e., -1) for the output will help identify null elements while
// gathering for Min and Max.
output.element<OutputType>(current_index) = output_is_valid ? val_index : default_output;
// The gather mask shouldn't contain null values, so
// always return zero
return true;
}
};
/**
* @brief Operator for applying an ARGMAX/ARGMIN rolling aggregation on a single window for struct.
*/
template <aggregation::Kind op, typename Comparator>
struct DeviceRollingArgMinMaxStruct : DeviceRollingArgMinMaxBase<cudf::struct_view, op> {
DeviceRollingArgMinMaxStruct(size_type _min_periods, Comparator const& _comp)
: DeviceRollingArgMinMaxBase<cudf::struct_view, op>(_min_periods), comp(_comp)
{
}
using DeviceRollingArgMinMaxBase<cudf::struct_view, op>::min_periods;
Comparator comp;
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index)
{
auto constexpr default_output = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL;
auto const valid_count =
has_nulls ? thrust::count_if(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
[&input](size_type idx) { return input.is_valid_nocheck(idx); })
: end_index - start_index;
// Use the sentinel value (i.e., -1) for the output will help identify null elements while
// gathering for Min and Max.
output.element<OutputType>(current_index) =
(valid_count >= min_periods) ? thrust::reduce(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
size_type{start_index},
comp)
: default_output;
// The gather mask shouldn't contain null values, so always return true.
return true;
}
};
/**
* @brief Operator for applying a COUNT_VALID rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingCountValid {
size_type min_periods;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::COUNT_VALID>
static constexpr bool is_supported()
{
return true;
}
DeviceRollingCountValid(size_type _min_periods) : min_periods(_min_periods) {}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index)
{
bool output_is_valid = ((end_index - start_index) >= min_periods);
if (output_is_valid) {
cudf::size_type count = 0;
if (!has_nulls) {
count = end_index - start_index;
} else {
count = thrust::count_if(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
[&input](auto i) { return input.is_valid_nocheck(i); });
}
output.element<OutputType>(current_index) = count;
}
return output_is_valid;
}
};
/**
* @brief Operator for applying a COUNT_ALL rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingCountAll {
size_type min_periods;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::COUNT_ALL>
static constexpr bool is_supported()
{
return true;
}
DeviceRollingCountAll(size_type _min_periods) : min_periods(_min_periods) {}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const&,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index)
{
cudf::size_type count = end_index - start_index;
bool output_is_valid = count >= min_periods;
output.element<OutputType>(current_index) = count;
return output_is_valid;
}
};
/**
* @brief Operator for applying a VAR rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingVariance {
size_type const min_periods;
size_type const ddof;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::VARIANCE>
static constexpr bool is_supported()
{
return is_fixed_width<InputType>() and not is_chrono<InputType>();
}
DeviceRollingVariance(size_type _min_periods, size_type _ddof)
: min_periods(_min_periods), ddof{_ddof}
{
}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index) const
{
using DeviceInputType = device_storage_type_t<InputType>;
// valid counts in the window
cudf::size_type const count =
has_nulls ? thrust::count_if(thrust::seq,
thrust::make_counting_iterator(start_index),
thrust::make_counting_iterator(end_index),
[&input](auto i) { return input.is_valid_nocheck(i); })
: end_index - start_index;
// Result will be null if any of the following conditions are met:
// - All inputs are null
// - Number of valid inputs is less than `min_periods`
bool output_is_valid = count > 0 and (count >= min_periods);
if (output_is_valid) {
if (count >= ddof) {
// Welford algorithm
// See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
OutputType m{0}, m2{0};
size_type running_count{0};
for (size_type i = start_index; i < end_index; i++) {
if (has_nulls and input.is_null_nocheck(i)) { continue; }
OutputType const x = static_cast<OutputType>(input.element<DeviceInputType>(i));
running_count++;
OutputType const tmp1 = x - m;
m += tmp1 / running_count;
OutputType const tmp2 = x - m;
m2 += tmp1 * tmp2;
}
if constexpr (is_fixed_point<InputType>()) {
// For fixed_point types, the previous computed value used unscaled rep-value,
// the final result should be multiplied by the square of decimal `scale`.
OutputType scaleby = exp10(static_cast<double>(input.type().scale()));
scaleby *= scaleby;
output.element<OutputType>(current_index) = m2 / (count - ddof) * scaleby;
} else {
output.element<OutputType>(current_index) = m2 / (count - ddof);
}
} else {
output.element<OutputType>(current_index) =
cuda::std::numeric_limits<OutputType>::signaling_NaN();
}
}
return output_is_valid;
}
};
/**
* @brief Operator for applying a ROW_NUMBER rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingRowNumber {
size_type min_periods;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::ROW_NUMBER>
static constexpr bool is_supported()
{
return true;
}
DeviceRollingRowNumber(size_type _min_periods) : min_periods(_min_periods) {}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const&,
column_device_view const&,
mutable_column_device_view& output,
size_type start_index,
size_type end_index,
size_type current_index)
{
bool output_is_valid = end_index - start_index >= min_periods;
output.element<OutputType>(current_index) = current_index - start_index + 1;
return output_is_valid;
}
};
struct agg_specific_empty_output {
template <typename InputType, aggregation::Kind op>
std::unique_ptr<column> operator()(column_view const& input, rolling_aggregation const&) const
{
using target_type = cudf::detail::target_type_t<InputType, op>;
if constexpr (std::is_same_v<cudf::detail::target_type_t<InputType, op>, void>) {
CUDF_FAIL("Unsupported combination of column-type and aggregation.");
}
if constexpr (cudf::is_fixed_width<target_type>()) {
return cudf::make_empty_column(type_to_id<target_type>());
}
if constexpr (op == aggregation::COLLECT_LIST) {
return cudf::make_lists_column(
0, make_empty_column(type_to_id<size_type>()), empty_like(input), 0, {});
}
return empty_like(input);
}
};
static std::unique_ptr<column> empty_output_for_rolling_aggregation(column_view const& input,
rolling_aggregation const& agg)
{
// TODO:
// Ideally, for UDF aggregations, the returned column would match
// the agg's return type. It currently returns empty_like(input), because:
// 1. This preserves prior behavior for empty input columns.
// 2. There is insufficient information to construct nested return columns.
// `cudf::make_udf_aggregation()` expresses the return type as a `data_type`
// which cannot express recursively nested types (e.g. `STRUCT<LIST<INT32>>`.)
// 3. In any case, UDFs that return nested types are not currently supported.
// Constructing a more accurate return type for UDFs will be taken up
// at a later date.
return agg.kind == aggregation::CUDA || agg.kind == aggregation::PTX
? empty_like(input)
: cudf::detail::dispatch_type_and_aggregation(
input.type(), agg.kind, agg_specific_empty_output{}, input, agg);
}
/**
* @brief Operator for applying a LEAD rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingLead {
size_type row_offset;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::LEAD>
static constexpr bool is_supported()
{
return cudf::is_fixed_width<T>();
}
template <typename T = InputType, std::enable_if_t<is_supported<T>()>* = nullptr>
DeviceRollingLead(size_type _row_offset) : row_offset(_row_offset)
{
}
template <typename T = InputType, std::enable_if_t<!is_supported<T>()>* = nullptr>
DeviceRollingLead(size_type _row_offset) : row_offset(_row_offset)
{
CUDF_FAIL("Invalid aggregation/type pair");
}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const& default_outputs,
mutable_column_device_view& output,
size_type,
size_type end_index,
size_type current_index)
{
// Offsets have already been normalized.
// Check if row is invalid.
if (row_offset > (end_index - current_index - 1)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) =
default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index + row_offset;
auto is_null = input.is_null(index);
if (!is_null) {
output.element<OutputType>(current_index) =
input.element<device_storage_type_t<InputType>>(index);
}
return !is_null;
}
};
/**
* @brief Operator for applying a LAG rolling aggregation on a single window.
*/
template <typename InputType>
struct DeviceRollingLag {
size_type row_offset;
// what operations do we support
template <typename T = InputType, aggregation::Kind O = aggregation::LAG>
static constexpr bool is_supported()
{
return cudf::is_fixed_width<T>();
}
template <typename T = InputType, std::enable_if_t<is_supported<T>()>* = nullptr>
DeviceRollingLag(size_type _row_offset) : row_offset(_row_offset)
{
}
template <typename T = InputType, std::enable_if_t<!is_supported<T>()>* = nullptr>
DeviceRollingLag(size_type _row_offset) : row_offset(_row_offset)
{
CUDF_FAIL("Invalid aggregation/type pair");
}
template <typename OutputType, bool has_nulls>
bool __device__ operator()(column_device_view const& input,
column_device_view const& default_outputs,
mutable_column_device_view& output,
size_type start_index,
size_type,
size_type current_index)
{
// Offsets have already been normalized.
// Check if row is invalid.
if (row_offset > (current_index - start_index)) {
// Invalid row marked. Use default value, if available.
if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; }
output.element<OutputType>(current_index) =
default_outputs.element<OutputType>(current_index);
return true;
}
// Not an invalid row.
auto index = current_index - row_offset;
auto is_null = input.is_null(index);
if (!is_null) {
output.element<OutputType>(current_index) =
input.element<device_storage_type_t<InputType>>(index);
}
return !is_null;
}
};
/**
* @brief Maps an `InputType and `aggregation::Kind` value to its corresponding
* rolling window operator.
*
* @tparam InputType The input type to map to its corresponding operator
* @tparam k The `aggregation::Kind` value to map to its corresponding operator
*/
template <typename InputType, aggregation::Kind k>
struct corresponding_rolling_operator {
using type = DeviceRolling<InputType, k>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::ARGMIN> {
using type = DeviceRollingArgMinMaxBase<InputType, aggregation::ARGMIN>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::ARGMAX> {
using type = DeviceRollingArgMinMaxBase<InputType, aggregation::ARGMAX>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::COUNT_VALID> {
using type = DeviceRollingCountValid<InputType>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::COUNT_ALL> {
using type = DeviceRollingCountAll<InputType>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::ROW_NUMBER> {
using type = DeviceRollingRowNumber<InputType>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::Kind::VARIANCE> {
using type = DeviceRollingVariance<InputType>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::Kind::LEAD> {
using type = DeviceRollingLead<InputType>;
};
template <typename InputType>
struct corresponding_rolling_operator<InputType, aggregation::Kind::LAG> {
using type = DeviceRollingLag<InputType>;
};
/**
* @brief Functor for creating a device rolling operator based on input type and aggregation type.
*/
template <typename InputType, aggregation::Kind k, typename = void>
struct create_rolling_operator {
auto operator()(size_type min_periods, rolling_aggregation const&)
{
return typename corresponding_rolling_operator<InputType, k>::type(min_periods);
}
};
template <typename InputType>
struct create_rolling_operator<InputType, aggregation::Kind::VARIANCE> {
auto operator()(size_type min_periods, rolling_aggregation const& agg)
{
return DeviceRollingVariance<InputType>{
min_periods, dynamic_cast<cudf::detail::var_aggregation const&>(agg)._ddof};
}
};
template <typename InputType>
struct create_rolling_operator<InputType, aggregation::Kind::LEAD> {
auto operator()(size_type, rolling_aggregation const& agg)
{
return DeviceRollingLead<InputType>{
dynamic_cast<cudf::detail::lead_lag_aggregation const&>(agg).row_offset};
}
};
template <typename InputType>
struct create_rolling_operator<InputType, aggregation::Kind::LAG> {
auto operator()(size_type, rolling_aggregation const& agg)
{
return DeviceRollingLag<InputType>{
dynamic_cast<cudf::detail::lead_lag_aggregation const&>(agg).row_offset};
}
};
template <typename InputType, aggregation::Kind k>
struct create_rolling_operator<
InputType,
k,
typename std::enable_if_t<std::is_same_v<InputType, cudf::string_view> &&
(k == aggregation::Kind::ARGMIN || k == aggregation::Kind::ARGMAX)>> {
auto operator()(size_type min_periods, rolling_aggregation const&)
{
return DeviceRollingArgMinMaxString<k>{min_periods};
}
};
template <typename InputType, aggregation::Kind k>
struct create_rolling_operator<
InputType,
k,
typename std::enable_if_t<std::is_same_v<InputType, cudf::struct_view> &&
(k == aggregation::Kind::ARGMIN || k == aggregation::Kind::ARGMAX)>> {
template <typename Comparator>
auto operator()(size_type min_periods, Comparator const& comp)
{
return DeviceRollingArgMinMaxStruct<k, Comparator>{min_periods, comp};
}
};
/**
* @brief Rolling window specific implementation of simple_aggregations_collector.
*
* The purpose of this class is to preprocess incoming aggregation/type pairs and
* potentially transform them into other aggregation/type pairs. Typically when this
* happens, the equivalent aggregation/type implementation of finalize() will perform
* some postprocessing step.
*
* An example of this would be applying a MIN aggregation to strings. This cannot be done
* directly in the rolling operation, so instead the following happens:
*
* - the rolling_aggregation_preprocessor transforms the incoming MIN/string pair to
* an ARGMIN/int pair.
* - The ARGMIN/int has the rolling operation applied to it, generating a list of indices
* that can then be used as a gather map.
* - The rolling_aggregation_postprocessor then takes this gather map and performs a final
* gather() on the input string data to generate the final output.
*
* Another example is COLLECT_LIST. COLLECT_LIST is odd in that it doesn't go through the
* normal gpu rolling kernel at all. It has a completely custom implementation. So the
* following happens:
*
* - the rolling_aggregation_preprocessor transforms the COLLECT_LIST aggregation into nothing,
* since no actual rolling window operation will be performed.
* - the rolling_aggregation_postprocessor calls the specialized rolling_collect_list()
* function to generate the final output.
*
*/
class rolling_aggregation_preprocessor final : public cudf::detail::simple_aggregations_collector {
public:
using cudf::detail::simple_aggregations_collector::visit;
// NOTE : all other aggregations are passed through unchanged via the default
// visit() function in the simple_aggregations_collector.
// MIN aggregations with strings are processed in 2 passes. The first pass performs
// the rolling operation on a ARGMIN aggregation to generate indices instead of values.
// Then a second pass uses those indices to gather the final strings. This step
// translates the MIN -> ARGMIN aggregation
std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
cudf::detail::min_aggregation const&) override
{
std::vector<std::unique_ptr<aggregation>> aggs;
aggs.push_back(col_type.id() == type_id::STRING || col_type.id() == type_id::STRUCT
? make_argmin_aggregation()
: make_min_aggregation());
return aggs;
}
// MAX aggregations with strings are processed in 2 passes. The first pass performs
// the rolling operation on a ARGMAX aggregation to generate indices instead of values.
// Then a second pass uses those indices to gather the final strings. This step
// translates the MAX -> ARGMAX aggregation
std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
cudf::detail::max_aggregation const&) override
{
std::vector<std::unique_ptr<aggregation>> aggs;
aggs.push_back(col_type.id() == type_id::STRING || col_type.id() == type_id::STRUCT
? make_argmax_aggregation()
: make_max_aggregation());
return aggs;
}
// COLLECT_LIST aggregations do not perform a rolling operation at all. They get processed
// entirely in the finalize() step.
std::vector<std::unique_ptr<aggregation>> visit(
data_type, cudf::detail::collect_list_aggregation const&) override
{
return {};
}
// COLLECT_SET aggregations do not perform a rolling operation at all. They get processed
// entirely in the finalize() step.
std::vector<std::unique_ptr<aggregation>> visit(
data_type, cudf::detail::collect_set_aggregation const&) override
{
return {};
}
// STD aggregations depends on VARIANCE aggregation. Each element is applied
// with square-root in the finalize() step.
std::vector<std::unique_ptr<aggregation>> visit(data_type,
cudf::detail::std_aggregation const& agg) override
{
std::vector<std::unique_ptr<aggregation>> aggs;
aggs.push_back(make_variance_aggregation(agg._ddof));
return aggs;
}
// LEAD and LAG have custom behaviors for non fixed-width types.
std::vector<std::unique_ptr<aggregation>> visit(
data_type col_type, cudf::detail::lead_lag_aggregation const& agg) override
{
// no rolling operation for non-fixed width. just a postprocess step at the end
if (!cudf::is_fixed_width(col_type)) { return {}; }
// otherwise, pass through
std::vector<std::unique_ptr<aggregation>> aggs;
aggs.push_back(agg.clone());
return aggs;
}
// NTH_ELEMENT aggregations are computed in finalize(). Skip preprocessing.
std::vector<std::unique_ptr<aggregation>> visit(
data_type, cudf::detail::nth_element_aggregation const&) override
{
return {};
}
};
/**
* @brief Rolling window specific implementation of aggregation_finalizer.
*
* The purpose of this class is to postprocess rolling window data depending on the
* aggregation/type pair. See the description of rolling_aggregation_preprocessor for
* a detailed description.
*
*/
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
class rolling_aggregation_postprocessor final : public cudf::detail::aggregation_finalizer {
public:
using cudf::detail::aggregation_finalizer::visit;
rolling_aggregation_postprocessor(column_view const& _input,
column_view const& _default_outputs,
data_type _result_type,
PrecedingWindowIterator _preceding_window_begin,
FollowingWindowIterator _following_window_begin,
int _min_periods,
std::unique_ptr<column>&& _intermediate,
rmm::cuda_stream_view _stream,
rmm::mr::device_memory_resource* _mr)
:
input(_input),
default_outputs(_default_outputs),
result_type(_result_type),
preceding_window_begin(_preceding_window_begin),
following_window_begin(_following_window_begin),
min_periods(_min_periods),
intermediate(std::move(_intermediate)),
result(nullptr),
stream(_stream),
mr(_mr)
{
}
// all non-specialized aggregation types simply pass the intermediate result through.
void visit(aggregation const&) override { result = std::move(intermediate); }
// perform a final gather on the generated ARGMIN data
void visit(cudf::detail::min_aggregation const&) override
{
if (result_type.id() == type_id::STRING || result_type.id() == type_id::STRUCT) {
// The rows that represent null elements will have negative values in gather map,
// and that's why nullify_out_of_bounds/ignore_out_of_bounds is true.
auto output_table = detail::gather(table_view{{input}},
intermediate->view(),
cudf::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
result = std::make_unique<cudf::column>(std::move(output_table->get_column(0)));
} else {
result = std::move(intermediate);
}
}
// perform a final gather on the generated ARGMAX data
void visit(cudf::detail::max_aggregation const&) override
{
if (result_type.id() == type_id::STRING || result_type.id() == type_id::STRUCT) {
// The rows that represent null elements will have negative values in gather map,
// and that's why nullify_out_of_bounds/ignore_out_of_bounds is true.
auto output_table = detail::gather(table_view{{input}},
intermediate->view(),
cudf::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
result = std::make_unique<cudf::column>(std::move(output_table->get_column(0)));
} else {
result = std::move(intermediate);
}
}
// perform the actual COLLECT_LIST operation entirely.
void visit(cudf::detail::collect_list_aggregation const& agg) override
{
result = rolling_collect_list(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg._null_handling,
stream,
mr);
}
// perform the actual COLLECT_SET operation entirely.
void visit(cudf::detail::collect_set_aggregation const& agg) override
{
auto const collected_list = rolling_collect_list(input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg._null_handling,
stream,
rmm::mr::get_current_device_resource());
result = lists::detail::distinct(
lists_column_view{collected_list->view()}, agg._nulls_equal, agg._nans_equal, stream, mr);
}
// perform the element-wise square root operation on result of VARIANCE
void visit(cudf::detail::std_aggregation const&) override
{
result = detail::unary_operation(intermediate->view(), unary_operator::SQRT, stream, mr);
}
std::unique_ptr<column> get_result()
{
CUDF_EXPECTS(result != nullptr,
"Calling result on rolling aggregation postprocessor that has not been visited in "
"rolling_window");
return std::move(result);
}
// LEAD and LAG have custom behaviors for non fixed-width types.
void visit(cudf::detail::lead_lag_aggregation const& agg) override
{
// if this is non-fixed width, run the custom lead-lag code
if (!cudf::is_fixed_width(result_type)) {
result =
cudf::detail::compute_lead_lag_for_nested<PrecedingWindowIterator, FollowingWindowIterator>(
agg.kind,
input,
default_outputs,
preceding_window_begin,
following_window_begin,
agg.row_offset,
stream,
mr);
}
// otherwise just pass through the intermediate
else {
result = std::move(intermediate);
}
}
// Nth_ELEMENT aggregation.
void visit(cudf::detail::nth_element_aggregation const& agg) override
{
result =
agg._null_handling == null_policy::EXCLUDE
? rolling::nth_element<null_policy::EXCLUDE>(
agg._n, input, preceding_window_begin, following_window_begin, min_periods, stream, mr)
: rolling::nth_element<null_policy::INCLUDE>(
agg._n, input, preceding_window_begin, following_window_begin, min_periods, stream, mr);
}
private:
column_view input;
column_view default_outputs;
data_type result_type;
PrecedingWindowIterator preceding_window_begin;
FollowingWindowIterator following_window_begin;
int min_periods;
std::unique_ptr<column> intermediate;
std::unique_ptr<column> result;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
};
/**
* @brief Computes the rolling window function
*
* @tparam OutputType Datatype of `output`
* @tparam block_size CUDA block size for the kernel
* @tparam has_nulls true if the input column has nulls
* @tparam DeviceRollingOperator An operator that performs a single windowing operation
* @tparam PrecedingWindowIterator iterator type (inferred)
* @tparam FollowingWindowIterator iterator type (inferred)
* @param[in] input Input column device view
* @param[in] default_outputs A column of per-row default values to be returned instead
* of nulls for certain aggregation types.
* @param[out] output Output column device view
* @param[out] output_valid_count Output count of valid values
* @param[in] device_operator The operator used to perform a single window operation
* @param[in] preceding_window_begin Rolling window size iterator, accumulates from
* in_col[i-preceding_window] to in_col[i] inclusive
* @param[in] following_window_begin Rolling window size iterator in the forward
* direction, accumulates from in_col[i] to in_col[i+following_window] inclusive
*/
template <typename OutputType,
int block_size,
bool has_nulls,
typename DeviceRollingOperator,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
__launch_bounds__(block_size) __global__
void gpu_rolling(column_device_view input,
column_device_view default_outputs,
mutable_column_device_view output,
size_type* __restrict__ output_valid_count,
DeviceRollingOperator device_operator,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin)
{
thread_index_type i = blockIdx.x * block_size + threadIdx.x;
thread_index_type const stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffff'ffffu, i < input.size());
while (i < input.size()) {
// to prevent overflow issues when computing bounds use int64_t
int64_t const preceding_window = preceding_window_begin[i];
int64_t const following_window = following_window_begin[i];
// compute bounds
auto const start = static_cast<size_type>(
min(static_cast<int64_t>(input.size()), max(int64_t{0}, i - preceding_window + 1)));
auto const end = static_cast<size_type>(
min(static_cast<int64_t>(input.size()), max(int64_t{0}, i + following_window + 1)));
auto const start_index = min(start, end);
auto const end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
volatile bool output_is_valid = false;
output_is_valid = device_operator.template operator()<OutputType, has_nulls>(
input, default_outputs, output, start_index, end_index, i);
// set the mask
cudf::bitmask_type const result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
/**
* @brief Type/aggregation dispatched functor for launching the gpu rolling window
* kernel.
*/
template <typename InputType>
struct rolling_window_launcher {
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<corresponding_rolling_operator<InputType, op>::type::is_supported(),
std::unique_ptr<column>>
operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
int min_periods,
[[maybe_unused]] rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const do_rolling = [&](auto const& device_op) {
auto output = make_fixed_width_column(
target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr);
auto const d_inp_ptr = column_device_view::create(input, stream);
auto const d_default_out_ptr = column_device_view::create(default_outputs, stream);
auto const d_out_ptr = mutable_column_device_view::create(output->mutable_view(), stream);
auto d_valid_count = rmm::device_scalar<size_type>{0, stream};
auto constexpr block_size = 256;
auto const grid = cudf::detail::grid_1d(input.size(), block_size);
using OutType = device_storage_type_t<target_type_t<InputType, op>>;
if (input.has_nulls()) {
gpu_rolling<OutType, block_size, true>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*d_inp_ptr,
*d_default_out_ptr,
*d_out_ptr,
d_valid_count.data(),
device_op,
preceding_window_begin,
following_window_begin);
} else {
gpu_rolling<OutType, block_size, false>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(*d_inp_ptr,
*d_default_out_ptr,
*d_out_ptr,
d_valid_count.data(),
device_op,
preceding_window_begin,
following_window_begin);
}
auto const valid_count = d_valid_count.value(stream);
output->set_null_count(output->size() - valid_count);
return output;
}; // end do_rolling
auto constexpr is_arg_minmax =
op == aggregation::Kind::ARGMIN || op == aggregation::Kind::ARGMAX;
if constexpr (is_arg_minmax && std::is_same_v<InputType, cudf::struct_view>) {
// Using comp_generator to create a LESS operator for finding ARGMIN/ARGMAX of structs.
auto const comp_generator =
cudf::reduction::detail::comparison_binop_generator::create<op>(input, stream);
auto const device_op =
create_rolling_operator<InputType, op>{}(min_periods, comp_generator.binop());
return do_rolling(device_op);
} else { // all the remaining rolling operations
auto const device_op = create_rolling_operator<InputType, op>{}(min_periods, agg);
return do_rolling(device_op);
}
}
template <aggregation::Kind op,
typename PrecedingWindowIterator,
typename FollowingWindowIterator>
std::enable_if_t<!corresponding_rolling_operator<InputType, op>::type::is_supported(),
std::unique_ptr<column>>
operator()(column_view const&,
column_view const&,
PrecedingWindowIterator,
FollowingWindowIterator,
int,
rolling_aggregation const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Invalid aggregation type/pair");
}
};
/**
* @brief Functor for performing the high level rolling logic.
*
* This does 3 basic things:
*
* - It calls the preprocess step on incoming aggregation/type pairs
* - It calls the aggregation-dispatched gpu-rolling operation
* - It calls the final postprocess step
*/
struct dispatch_rolling {
template <typename InputType, typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> operator()(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// do any preprocessing of aggregations (eg, MIN -> ARGMIN, COLLECT_LIST -> nothing)
rolling_aggregation_preprocessor preprocessor;
auto preprocessed_aggs = agg.get_simple_aggregations(input.type(), preprocessor);
CUDF_EXPECTS(preprocessed_aggs.size() <= 1,
"Encountered a non-trivial rolling aggregation result");
// perform the rolling window if we produced an aggregation to use
auto intermediate = preprocessed_aggs.size() > 0
? aggregation_dispatcher(
dynamic_cast<rolling_aggregation const&>(*preprocessed_aggs[0]).kind,
rolling_window_launcher<InputType>{},
input,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
dynamic_cast<rolling_aggregation const&>(*preprocessed_aggs[0]),
stream,
mr)
: nullptr;
// finalize.
auto const result_type = target_type(input.type(), agg.kind);
rolling_aggregation_postprocessor postprocessor(input,
default_outputs,
result_type,
preceding_window_begin,
following_window_begin,
min_periods,
std::move(intermediate),
stream,
mr);
agg.finalize(postprocessor);
return postprocessor.get_result();
}
};
// Applies a user-defined rolling window function to the values in a column.
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window_udf(column_view const& input,
PrecedingWindowIterator preceding_window,
std::string const& preceding_window_str,
FollowingWindowIterator following_window,
std::string const& following_window_str,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.has_nulls()) {
CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls.");
}
min_periods = std::max(min_periods, 0);
auto& udf_agg = dynamic_cast<udf_aggregation const&>(agg);
std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg._source));
std::string cuda_source;
switch (udf_agg.kind) {
case aggregation::Kind::PTX:
cuda_source += cudf::jit::parse_single_function_ptx(udf_agg._source,
udf_agg._function_name,
cudf::type_to_name(udf_agg._output_type),
{0, 5}); // args 0 and 5 are pointers.
break;
case aggregation::Kind::CUDA:
cuda_source += cudf::jit::parse_single_function_cuda(udf_agg._source, udf_agg._function_name);
break;
default: CUDF_FAIL("Unsupported UDF type.");
}
std::unique_ptr<column> output = make_numeric_column(
udf_agg._output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
auto output_view = output->mutable_view();
rmm::device_scalar<size_type> device_valid_count{0, stream};
std::string kernel_name =
jitify2::reflection::Template("cudf::rolling::jit::gpu_rolling_new") //
.instantiate(cudf::type_to_name(input.type()), // list of template arguments
cudf::type_to_name(output->type()),
udf_agg._operator_name,
preceding_window_str.c_str(),
following_window_str.c_str());
cudf::jit::get_program_cache(*rolling_jit_kernel_cu_jit)
.get_kernel(
kernel_name, {}, {{"rolling/jit/operation-udf.hpp", cuda_source}}, {"-arch=sm_."}) //
->configure_1d_max_occupancy(0, 0, 0, stream.value()) //
->launch(input.size(),
cudf::jit::get_data_ptr(input),
input.null_mask(),
cudf::jit::get_data_ptr(output_view),
output_view.null_mask(),
device_valid_count.data(),
preceding_window,
following_window,
min_periods);
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CUDF_CHECK_CUDA(stream.value());
return output;
}
/**
* @copydoc cudf::rolling_window(column_view const& input,
* PrecedingWindowIterator preceding_window_begin,
* FollowingWindowIterator following_window_begin,
* size_type min_periods,
* rolling_aggregation const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename PrecedingWindowIterator, typename FollowingWindowIterator>
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
PrecedingWindowIterator preceding_window_begin,
FollowingWindowIterator following_window_begin,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.is_empty()) { return cudf::detail::empty_output_for_rolling_aggregation(input, agg); }
if (cudf::is_dictionary(input.type())) {
CUDF_EXPECTS(agg.kind == aggregation::COUNT_ALL || agg.kind == aggregation::COUNT_VALID ||
agg.kind == aggregation::ROW_NUMBER || agg.kind == aggregation::MIN ||
agg.kind == aggregation::MAX || agg.kind == aggregation::LEAD ||
agg.kind == aggregation::LAG,
"Invalid aggregation for dictionary column");
}
if (agg.kind != aggregation::LEAD && agg.kind != aggregation::LAG) {
CUDF_EXPECTS(default_outputs.is_empty(),
"Only LEAD/LAG window functions support default values.");
}
min_periods = std::max(min_periods, 0);
auto input_col = cudf::is_dictionary(input.type())
? dictionary_column_view(input).get_indices_annotated()
: input;
auto output = cudf::type_dispatcher(input_col.type(),
dispatch_rolling{},
input_col,
default_outputs,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
if (!cudf::is_dictionary(input.type())) return output;
// dictionary column post processing
if (agg.kind == aggregation::COUNT_ALL || agg.kind == aggregation::COUNT_VALID ||
agg.kind == aggregation::ROW_NUMBER) {
return output;
}
// output is new dictionary indices (including nulls)
auto keys = std::make_unique<column>(dictionary_column_view(input).keys(), stream, mr);
auto const indices_type = output->type(); // capture these
auto const output_size = output->size(); // before calling
auto const null_count = output->null_count(); // release()
auto contents = output->release();
// create indices column from output column data
auto indices = std::make_unique<column>(indices_type,
output_size,
std::move(*(contents.data.release())),
rmm::device_buffer{0, stream, mr},
0);
// create dictionary from keys and indices
return make_dictionary_column(
std::move(keys), std::move(indices), std::move(*(contents.null_mask.release())), null_count);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/lead_lag_nested.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/scatter.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <vector>
namespace cudf::detail {
namespace {
/**
* @brief Predicate to find indices at which LEAD/LAG evaluated to null.
*/
template <typename GatherMapIter>
class is_null_index_predicate_impl {
public:
is_null_index_predicate_impl(size_type input_size, GatherMapIter gather_)
: _null_index{input_size}, _gather{gather_}
{
}
bool __device__ operator()(size_type i) const { return _gather[i] == _null_index; }
private:
size_type const _null_index; // Index value to use to output NULL for LEAD/LAG calculation.
GatherMapIter _gather; // Iterator for gather-map entries.
};
/**
* @brief Helper to construct is_null_index_predicate_impl
*/
template <typename GatherMapIter>
is_null_index_predicate_impl<GatherMapIter> is_null_index_predicate(size_type input_size,
GatherMapIter gather)
{
return is_null_index_predicate_impl<GatherMapIter>{input_size, gather};
}
} // namespace
/**
* @brief Helper function to calculate LEAD/LAG for nested-type input columns.
*
* @tparam PrecedingIterator Iterator-type that returns the preceding bounds
* @tparam FollowingIterator Iterator-type that returns the following bounds
* @param[in] op Aggregation kind.
* @param[in] input Nested-type input column for LEAD/LAG calculation
* @param[in] default_outputs Default values to use as outputs, if LEAD/LAG
* offset crosses column/group boundaries
* @param[in] preceding Iterator to retrieve preceding window bounds
* @param[in] following Iterator to retrieve following window bounds
* @param[in] row_offset Lead/Lag offset, indicating which row after/before
* the current row is to be returned
* @param[in] stream CUDA stream for device memory operations/allocations
* @param[in] mr device_memory_resource for device memory allocations
*/
template <typename PrecedingIter, typename FollowingIter>
std::unique_ptr<column> compute_lead_lag_for_nested(aggregation::Kind op,
column_view const& input,
column_view const& default_outputs,
PrecedingIter preceding,
FollowingIter following,
size_type row_offset,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(op == aggregation::LEAD || op == aggregation::LAG,
"Unexpected aggregation type in compute_lead_lag_for_nested");
CUDF_EXPECTS(default_outputs.type().id() == input.type().id(),
"Defaults column type must match input column."); // Because LEAD/LAG.
CUDF_EXPECTS(default_outputs.is_empty() || (input.size() == default_outputs.size()),
"Number of defaults must match input column.");
// For LEAD(0)/LAG(0), no computation need be performed.
// Return copy of input.
if (row_offset == 0) { return std::make_unique<column>(input, stream, mr); }
// Algorithm:
//
// 1. Construct gather_map with the LEAD/LAG offset applied to the indices.
// E.g. A gather_map of:
// {0, 1, 2, 3, ..., N-3, N-2, N-1}
// would select the input column, unchanged.
//
// For LEAD(2), the following gather_map is used:
// {3, 4, 5, 6, ..., N-1, NULL_INDEX, NULL_INDEX}
// where `NULL_INDEX` selects `NULL` for the gather.
//
// Similarly, LAG(2) is implemented using the following gather_map:
// {NULL_INDEX, NULL_INDEX, 0, 1, 2...}
//
// 2. Gather input column based on the gather_map.
// 3. If default outputs are available, scatter contents of `default_outputs`
// to all positions where nulls where gathered in step 2.
//
// Note: Step 3 can be switched to use `copy_if_else()`, once it supports
// nested types.
auto static constexpr size_data_type = data_type{type_to_id<size_type>()};
auto gather_map_column =
make_numeric_column(size_data_type, input.size(), mask_state::UNALLOCATED, stream);
auto gather_map = gather_map_column->mutable_view();
auto const input_size = input.size();
auto const null_index = input.size();
if (op == aggregation::LEAD) {
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(size_type{input.size()}),
gather_map.begin<size_type>(),
[following, input_size, null_index, row_offset] __device__(size_type i) {
// Note: grouped_*rolling_window() trims preceding/following to
// the beginning/end of the group. `rolling_window()` does not.
// Must trim _following[i] so as not to go past the column end.
auto _following = min(following[i], input_size - i - 1);
return (row_offset > _following) ? null_index : (i + row_offset);
});
} else {
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(size_type{input.size()}),
gather_map.begin<size_type>(),
[preceding, input_size, null_index, row_offset] __device__(size_type i) {
// Note: grouped_*rolling_window() trims preceding/following to
// the beginning/end of the group. `rolling_window()` does not.
// Must trim _preceding[i] so as not to go past the column start.
auto _preceding = min(preceding[i], i + 1);
return (row_offset > (_preceding - 1)) ? null_index : (i - row_offset);
});
}
auto output_with_nulls = cudf::detail::gather(table_view{std::vector<column_view>{input}},
gather_map_column->view(),
out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
if (default_outputs.is_empty()) { return std::move(output_with_nulls->release()[0]); }
// Must scatter defaults.
auto scatter_map = rmm::device_uvector<size_type>(input.size(), stream);
// Find all indices at which LEAD/LAG computed nulls previously.
auto scatter_map_end =
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(size_type{input.size()}),
scatter_map.begin(),
is_null_index_predicate(input.size(), gather_map.begin<size_type>()));
scatter_map.resize(thrust::distance(scatter_map.begin(), scatter_map_end), stream);
// Bail early, if all LEAD/LAG computations succeeded. No defaults need be substituted.
if (scatter_map.is_empty()) { return std::move(output_with_nulls->release()[0]); }
// Gather only those default values that are to be substituted.
auto gathered_defaults =
cudf::detail::gather(table_view{std::vector<column_view>{default_outputs}},
scatter_map,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
rmm::mr::get_current_device_resource());
// Scatter defaults into locations where LEAD/LAG computed nulls.
auto scattered_results = cudf::detail::scatter(
table_view{std::vector<column_view>{gathered_defaults->release()[0]->view()}},
scatter_map,
table_view{std::vector<column_view>{output_with_nulls->release()[0]->view()}},
stream,
mr);
return std::move(scattered_results->release()[0]);
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/range_window_bounds.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/rolling/range_window_bounds.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/wrappers/durations.hpp>
namespace cudf {
namespace detail {
/// Checks if the specified type is supported in a range_window_bounds.
template <typename RangeType>
constexpr bool is_supported_range_type()
{
return cudf::is_duration<RangeType>() || cudf::is_fixed_point<RangeType>() ||
(cudf::is_numeric<RangeType>() && !cudf::is_boolean<RangeType>());
}
/// Checks if the specified type is a supported target type,
/// as an order-by column, for comparisons with a range_window_bounds scalar.
template <typename ColumnType>
constexpr bool is_supported_order_by_column_type()
{
return cudf::is_timestamp<ColumnType>() || cudf::is_fixed_point<ColumnType>() ||
(cudf::is_numeric<ColumnType>() && !cudf::is_boolean<ColumnType>()) ||
std::is_same_v<ColumnType, cudf::string_view>;
}
/// Range-comparable representation type for an orderby column type.
/// This is the datatype used for range comparisons.
/// 1. For integral orderby column types `T`, comparisons are done as `T`.
/// E.g. `range_type_for<int32_t>` == `int32_t`.
/// 2. For timestamp orderby columns:
/// a. For `TIMESTAMP_DAYS`, the range-type is `DURATION_DAYS`.
/// Comparisons are done in `int32_t`.
/// b. For all other timestamp types, comparisons are done in `int64_t`.
/// 3. For decimal types, all comparisons are done with the rep type,
/// after scaling the rep value to the same scale as the order by column:
/// a. For decimal32, the range-type is `int32_t`.
/// b. For decimal64, the range-type is `int64_t`.
/// c. For decimal128, the range-type is `__int128_t`.
template <typename ColumnType, typename = void>
struct range_type_impl {
using type = void;
using rep_type = void;
};
template <typename ColumnType>
struct range_type_impl<
ColumnType,
std::enable_if_t<cudf::is_numeric<ColumnType>() && !cudf::is_boolean<ColumnType>(), void>> {
using type = ColumnType;
using rep_type = ColumnType;
};
template <typename TimestampType>
struct range_type_impl<TimestampType, std::enable_if_t<cudf::is_timestamp<TimestampType>(), void>> {
using type = typename TimestampType::duration;
using rep_type = typename type::rep;
};
template <typename FixedPointType>
struct range_type_impl<FixedPointType,
std::enable_if_t<cudf::is_fixed_point<FixedPointType>(), void>> {
using type = FixedPointType;
using rep_type = typename type::rep;
};
template <typename ColumnType>
using range_type = typename range_type_impl<ColumnType>::type;
template <typename ColumnType>
using range_rep_type = typename range_type_impl<ColumnType>::rep_type;
template <typename T>
void assert_non_negative([[maybe_unused]] T const& value)
{
if constexpr (std::numeric_limits<T>::is_signed) {
CUDF_EXPECTS(value >= T{0}, "Range scalar must be >= 0.");
}
}
template <typename RangeT,
typename RepT,
CUDF_ENABLE_IF(cudf::is_numeric<RangeT>() && !cudf::is_boolean<RangeT>())>
RepT range_comparable_value_impl(scalar const& range_scalar,
bool,
data_type const&,
rmm::cuda_stream_view stream)
{
auto val = static_cast<numeric_scalar<RangeT> const&>(range_scalar).value(stream);
assert_non_negative(val);
return val;
}
template <typename RangeT, typename RepT, CUDF_ENABLE_IF(cudf::is_duration<RangeT>())>
RepT range_comparable_value_impl(scalar const& range_scalar,
bool,
data_type const&,
rmm::cuda_stream_view stream)
{
auto val = static_cast<duration_scalar<RangeT> const&>(range_scalar).value(stream).count();
assert_non_negative(val);
return val;
}
template <typename RangeT, typename RepT, CUDF_ENABLE_IF(cudf::is_fixed_point<RangeT>())>
RepT range_comparable_value_impl(scalar const& range_scalar,
bool is_unbounded,
data_type const& order_by_data_type,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(is_unbounded || range_scalar.type().scale() >= order_by_data_type.scale(),
"Range bounds scalar must match/exceed the scale of the orderby column.");
auto const fixed_point_value =
static_cast<fixed_point_scalar<RangeT> const&>(range_scalar).fixed_point_value(stream);
auto const value =
fixed_point_value.rescaled(numeric::scale_type{order_by_data_type.scale()}).value();
assert_non_negative(value);
return value;
}
/**
* @brief Fetch the value of the range_window_bounds scalar, for comparisons
* with an orderby column's rows.
*
* @tparam OrderByType The type of the orderby column with which the range value will be compared
* @param range_bounds The range_window_bounds whose value is to be read
* @param order_by_data_type The data type for the order-by column
* @param stream The CUDA stream for device memory operations
* @return RepType Value of the range scalar
*/
template <typename OrderByType>
range_rep_type<OrderByType> range_comparable_value(range_window_bounds const& range_bounds,
data_type const& order_by_data_type,
rmm::cuda_stream_view stream)
{
auto const& range_scalar = range_bounds.range_scalar();
using range_type = cudf::detail::range_type<OrderByType>;
CUDF_EXPECTS(range_scalar.type().id() == cudf::type_to_id<range_type>(),
"Range bounds scalar must match the type of the orderby column.");
using rep_type = cudf::detail::range_rep_type<OrderByType>;
return range_comparable_value_impl<range_type, rep_type>(
range_scalar, range_bounds.is_unbounded(), order_by_data_type, stream);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling_variable_window.cu
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rolling.cuh"
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
namespace cudf::detail {
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (preceding_window.is_empty() || following_window.is_empty() || input.is_empty()) {
return cudf::detail::empty_output_for_rolling_aggregation(input, agg);
}
CUDF_EXPECTS(preceding_window.type().id() == type_id::INT32 &&
following_window.type().id() == type_id::INT32,
"preceding_window/following_window must have type_id::INT32 type");
CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(),
"preceding_window/following_window size must match input size");
if (agg.kind == aggregation::CUDA || agg.kind == aggregation::PTX) {
// TODO: In future, might need to clamp preceding/following to column boundaries.
return cudf::detail::rolling_window_udf(input,
preceding_window.begin<size_type>(),
"cudf::size_type*",
following_window.begin<size_type>(),
"cudf::size_type*",
min_periods,
agg,
stream,
mr);
} else {
auto defaults_col =
cudf::is_dictionary(input.type()) ? dictionary_column_view(input).indices() : input;
// Clamp preceding/following to column boundaries.
// E.g. If preceding_window == [2, 2, 2, 2, 2] for a column of 5 elements, the new
// preceding_window will be: [1, 2, 2, 2, 1]
auto const preceding_window_begin = cudf::detail::make_counting_transform_iterator(
0, [preceding = preceding_window.begin<size_type>()] __device__(size_type i) {
return thrust::min(i + 1, preceding[i]);
});
auto const following_window_begin = cudf::detail::make_counting_transform_iterator(
0,
[col_size = input.size(), following = following_window.begin<size_type>()] __device__(
size_type i) { return thrust::min(col_size - i - 1, following[i]); });
return cudf::detail::rolling_window(input,
empty_like(defaults_col)->view(),
preceding_window_begin,
following_window_begin,
min_periods,
agg,
stream,
mr);
}
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling_collect_list.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/strings/detail/strings_children.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/extrema.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
/**
* @brief Creates the offsets child of the result of the `COLLECT_LIST` window aggregation
*
* Given the input column, the preceding/following window bounds, and `min_periods`,
* the sizes of each list row may be computed. These values can then be used to
* calculate the offsets for the result of `COLLECT_LIST`.
*
* Note: If `min_periods` exceeds the number of observations for a window, the size
* is set to `0` (since the result is `null`).
*/
template <typename PrecedingIter, typename FollowingIter>
std::unique_ptr<column> create_collect_offsets(size_type input_size,
PrecedingIter preceding_begin,
FollowingIter following_begin,
size_type min_periods,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Materialize offsets column.
auto static constexpr size_data_type = data_type{type_to_id<size_type>()};
auto sizes = make_fixed_width_column(size_data_type, input_size, mask_state::UNALLOCATED, stream);
auto mutable_sizes = sizes->mutable_view();
// Consider the following preceding/following values:
// preceding = [1,2,2,2,2]
// following = [1,1,1,1,0]
// The sum of the vectors should yield the window sizes:
// prec + foll = [2,3,3,3,2]
//
// If min_periods=2, all rows have at least `min_periods` observations.
// But if min_periods=3, rows at indices 0 and 4 have too few observations, and must return
// null. The sizes at these positions must be 0, i.e.
// prec + foll = [0,3,3,3,0]
thrust::transform(rmm::exec_policy(stream),
preceding_begin,
preceding_begin + input_size,
following_begin,
mutable_sizes.begin<size_type>(),
[min_periods] __device__(auto const preceding, auto const following) {
return (preceding + following) < min_periods ? 0 : (preceding + following);
});
// Convert `sizes` to an offsets column, via inclusive_scan():
auto offsets_column = std::get<0>(cudf::detail::make_offsets_child_column(
sizes->view().begin<size_type>(), sizes->view().end<size_type>(), stream, mr));
return offsets_column;
}
/**
* @brief Generate mapping of each row in the COLLECT_LIST result's child column
* to the index of the row it belongs to.
*
* If
* input col == [A,B,C,D,E]
* and preceding == [1,2,2,2,2],
* and following == [1,1,1,1,0],
* then,
* collect result == [ [A,B], [A,B,C], [B,C,D], [C,D,E], [D,E] ]
* i.e. result offset column == [0,2,5,8,11,13],
* and result child column == [A,B,A,B,C,B,C,D,C,D,E,D,E].
* Mapping back to `input` == [0,1,0,1,2,1,2,3,2,3,4,3,4]
*/
std::unique_ptr<column> get_list_child_to_list_row_mapping(cudf::column_view const& offsets,
rmm::cuda_stream_view stream);
/**
* @brief Create gather map to generate the child column of the result of
* the `COLLECT_LIST` window aggregation.
*/
template <typename PrecedingIter>
std::unique_ptr<column> create_collect_gather_map(column_view const& child_offsets,
column_view const& per_row_mapping,
PrecedingIter preceding_iter,
rmm::cuda_stream_view stream)
{
auto gather_map = make_fixed_width_column(
data_type{type_to_id<size_type>()}, per_row_mapping.size(), mask_state::UNALLOCATED, stream);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(per_row_mapping.size()),
gather_map->mutable_view().template begin<size_type>(),
[d_offsets =
child_offsets.template begin<size_type>(), // E.g. [0, 2, 5, 8, 11, 13]
d_groups =
per_row_mapping.template begin<size_type>(), // E.g. [0,0, 1,1,1, 2,2,2, 3,3,3, 4,4]
d_prev = preceding_iter] __device__(auto i) {
auto group = d_groups[i];
auto group_start_offset = d_offsets[group];
auto relative_index = i - group_start_offset;
return (group - d_prev[group] + 1) + relative_index;
});
return gather_map;
}
/**
* @brief Count null entries in result of COLLECT_LIST.
*/
size_type count_child_nulls(column_view const& input,
std::unique_ptr<column> const& gather_map,
rmm::cuda_stream_view stream);
/**
* @brief Purge entries for null inputs from gather_map, and adjust offsets.
*/
std::pair<std::unique_ptr<column>, std::unique_ptr<column>> purge_null_entries(
column_view const& input,
column_view const& gather_map,
column_view const& offsets,
size_type num_child_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
template <typename PrecedingIter, typename FollowingIter>
std::unique_ptr<column> rolling_collect_list(column_view const& input,
column_view const& default_outputs,
PrecedingIter preceding_begin_raw,
FollowingIter following_begin_raw,
size_type min_periods,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(default_outputs.is_empty(),
"COLLECT_LIST window function does not support default values.");
if (input.is_empty()) return empty_like(input);
// Fix up preceding/following iterators to respect column boundaries,
// similar to gpu_rolling().
// `rolling_window()` does not fix up preceding/following so as not to read past
// column boundaries.
// `grouped_rolling_window()` and `time_range_based_grouped_rolling_window() do.
auto preceding_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [preceding_begin_raw] __device__(auto i) {
return thrust::min(preceding_begin_raw[i], i + 1);
});
auto following_begin =
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
[following_begin_raw, size = input.size()] __device__(auto i) {
return thrust::min(following_begin_raw[i], size - i - 1);
});
// Materialize collect list's offsets.
auto offsets =
create_collect_offsets(input.size(), preceding_begin, following_begin, min_periods, stream, mr);
// Map each element of the collect() result's child column
// to the index where it appears in the input.
auto per_row_mapping = get_list_child_to_list_row_mapping(offsets->view(), stream);
// Generate gather map to produce the collect() result's child column.
auto gather_map =
create_collect_gather_map(offsets->view(), per_row_mapping->view(), preceding_begin, stream);
// If gather_map collects null elements, and null_policy == EXCLUDE,
// those elements must be filtered out, and offsets recomputed.
if (null_handling == null_policy::EXCLUDE && input.has_nulls()) {
auto num_child_nulls = count_child_nulls(input, gather_map, stream);
if (num_child_nulls != 0) {
std::tie(gather_map, offsets) =
purge_null_entries(input, *gather_map, *offsets, num_child_nulls, stream, mr);
}
}
// gather(), to construct child column.
auto gather_output = cudf::detail::gather(table_view{std::vector<column_view>{input}},
gather_map->view(),
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
auto [null_mask, null_count] = valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(input.size()),
[preceding_begin, following_begin, min_periods] __device__(auto i) {
return (preceding_begin[i] + following_begin[i]) >= min_periods;
},
stream,
mr);
return make_lists_column(input.size(),
std::move(offsets),
std::move(gather_output->release()[0]),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/nth_element.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/utilities/bit.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <limits>
namespace cudf::detail::rolling {
/**
* @brief Functor to construct gather-map indices for NTH_ELEMENT rolling aggregation.
*
* By definition, the `N`th element is deemed null (i.e. the gather index is set to "nullify")
* for the following cases:
* 1. The window has fewer elements than `min_periods`.
* 2. N falls outside the window, i.e. N ∉ [-window_size, window_size).
* 3. `null_handling == EXCLUDE`, and the window has fewer than `N` non-null elements.
*
* If none of the above holds true, the result is non-null. How the value is determined
* depends on `null_handling`:
* 1. `null_handling == INCLUDE`: The required value is the `N`th value from the window's start.
* i.e. the gather index is window_start + N (adjusted for negative N).
* 2. `null_handling == EXCLUDE`: The required value is the `N`th non-null value from the
* window's start. i.e. Return index of the `N`th non-null value.
*/
template <null_policy null_handling, typename PrecedingIter, typename FollowingIter>
struct gather_index_calculator {
size_type n;
bitmask_type const* input_nullmask;
bool exclude_nulls;
PrecedingIter preceding;
FollowingIter following;
size_type min_periods;
rmm::cuda_stream_view stream;
static size_type constexpr NULL_INDEX =
std::numeric_limits<size_type>::min(); // For nullifying with gather.
gather_index_calculator(size_type n,
column_view input,
PrecedingIter preceding,
FollowingIter following,
size_type min_periods,
rmm::cuda_stream_view stream)
: n{n},
input_nullmask{input.null_mask()},
exclude_nulls{null_handling == null_policy::EXCLUDE and input.has_nulls()},
preceding{preceding},
following{following},
min_periods{min_periods},
stream{stream}
{
}
/// For `null_policy::EXCLUDE`, find gather index for `N`th non-null value.
template <typename Iter>
size_type __device__ index_of_nth_non_null(Iter begin, size_type window_size) const
{
auto reqd_valid_count = n >= 0 ? n : (-n - 1);
auto const pred_nth_valid = [&reqd_valid_count, input_nullmask = input_nullmask](size_type j) {
return cudf::bit_is_set(input_nullmask, j) && reqd_valid_count-- == 0;
};
auto const end = begin + window_size;
auto const found = thrust::find_if(thrust::seq, begin, end, pred_nth_valid);
return found == end ? NULL_INDEX : *found;
}
size_type __device__ operator()(size_type i) const
{
// preceding[i] includes the current row.
auto const window_size = preceding[i] + following[i];
if (min_periods > window_size) { return NULL_INDEX; }
auto const wrapped_n = n >= 0 ? n : (window_size + n);
if (wrapped_n < 0 || wrapped_n > (window_size - 1)) {
return NULL_INDEX; // n lies outside the window.
}
// Out of short-circuit exits.
// If nulls don't need to be excluded, a fixed window offset calculation is sufficient.
auto const window_start = i - preceding[i] + 1;
if (not exclude_nulls) { return window_start + wrapped_n; }
// Must exclude nulls. Must examine each row in the window.
auto const window_end = window_start + window_size;
return n >= 0 ? index_of_nth_non_null(thrust::make_counting_iterator(window_start), window_size)
: index_of_nth_non_null(
thrust::make_reverse_iterator(thrust::make_counting_iterator(window_end)),
window_size);
}
};
/**
* @brief Helper function for NTH_ELEMENT window aggregation
*
* The `N`th element is deemed null for the following cases:
* 1. The window has fewer elements than `min_periods`.
* 2. N falls outside the window, i.e. N ∉ [-window_size, window_size).
* 3. `null_handling == EXCLUDE`, and the window has fewer than `N` non-null elements.
*
* If none of the above holds true, the result is non-null. How the value is determined
* depends on `null_handling`:
* 1. `null_handling == INCLUDE`: The required value is the `N`th value from the window's start.
* 2. `null_handling == EXCLUDE`: The required value is the `N`th *non-null* value from the
* window's start. If the window has fewer than `N` non-null values, the result is null.
*
* @tparam null_handling Whether to include/exclude null rows in the window
* @tparam PrecedingIter Type of iterator for preceding window
* @tparam FollowingIter Type of iterator for following window
* @param n The index of the element to be returned
* @param input The input column
* @param preceding Iterator specifying the preceding window bound
* @param following Iterator specifying the following window bound
* @param min_periods The minimum number of rows required in the window
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A column the `n`th element of the specified window for each row
*/
template <null_policy null_handling, typename PrecedingIter, typename FollowingIter>
std::unique_ptr<column> nth_element(size_type n,
column_view const& input,
PrecedingIter preceding,
FollowingIter following,
size_type min_periods,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const gather_iter = cudf::detail::make_counting_transform_iterator(
0,
gather_index_calculator<null_handling, PrecedingIter, FollowingIter>{
n, input, preceding, following, min_periods, stream});
auto gather_map = rmm::device_uvector<size_type>(input.size(), stream);
thrust::copy(
rmm::exec_policy(stream), gather_iter, gather_iter + input.size(), gather_map.begin());
auto gathered = cudf::detail::gather(table_view{{input}},
gather_map,
cudf::out_of_bounds_policy::NULLIFY,
negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
return std::move(gathered.front());
}
} // namespace cudf::detail::rolling
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/utilities/traits.hpp>
namespace cudf {
// helper functions - used in the rolling window implementation and tests
namespace detail {
// store functor
template <typename T, bool is_mean = false>
struct rolling_store_output_functor {
CUDF_HOST_DEVICE inline void operator()(T& out, T& val, size_type count) { out = val; }
};
// Specialization for MEAN
template <typename _T>
struct rolling_store_output_functor<_T, true> {
// SFINAE for non-bool types
template <typename T = _T,
std::enable_if_t<!(cudf::is_boolean<T>() || cudf::is_timestamp<T>())>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(T& out, T& val, size_type count)
{
out = val / count;
}
// SFINAE for bool type
template <typename T = _T, std::enable_if_t<cudf::is_boolean<T>()>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(T& out, T& val, size_type count)
{
out = static_cast<int32_t>(val) / count;
}
// SFINAE for timestamp types
template <typename T = _T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(T& out, T& val, size_type count)
{
out = static_cast<T>(val.time_since_epoch() / count);
}
};
/**
* @copydoc cudf::rolling_window(column_view const& input,
* column_view const& default_outputs,
* size_type preceding_window,
* size_type following_window,
* size_type min_periods,
* rolling_aggregation const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream to use for device memory operations
*/
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::rolling_window(column_view const& input,
* column_view const& preceding_window,
* column_view const& following_window,
* size_type min_periods,
* rolling_aggregation const& agg,
* rmm::mr::device_memory_resource* mr);
*
* @param stream CUDA stream to use for device memory operations
*/
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/optimized_unbounded_window.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace rmm::mr {
class device_memory_resource;
}
namespace cudf {
class rolling_aggregation;
class table_view;
namespace detail {
/**
* @brief Checks if it is possible to optimize fully UNBOUNDED window function.
*
* @return true if the window aggregation can optimized, i.e. if it is unbounded-preceding,
* unbounded-following, if it has a supported aggregation type, and if min_periods is 1.
* @return false if the window aggregation cannot be optimized.
*/
bool can_optimize_unbounded_window(bool unbounded_preceding,
bool unbounded_following,
size_type min_periods,
rolling_aggregation const& agg);
/**
* @brief Optimized bypass for fully UNBOUNDED window functions.
*
* @return the result column from running the unbounded window aggregation,
* via the optimized aggregation/reduction path.
*/
std::unique_ptr<column> optimized_unbounded_window(table_view const& group_keys,
column_view const& input,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling_fixed_window.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rolling.cuh"
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf_test/column_utilities.hpp>
#include <thrust/extrema.h>
namespace cudf::detail {
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (input.is_empty()) { return cudf::detail::empty_output_for_rolling_aggregation(input, agg); }
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()),
"Defaults column must be either empty or have as many rows as the input column.");
CUDF_EXPECTS(-(preceding_window - 1) <= following_window,
"Preceding window bounds must precede the following window bounds.");
if (agg.kind == aggregation::CUDA || agg.kind == aggregation::PTX) {
// TODO: In future, might need to clamp preceding/following to column boundaries.
return cudf::detail::rolling_window_udf(input,
preceding_window,
"cudf::size_type",
following_window,
"cudf::size_type",
min_periods,
agg,
stream,
mr);
} else {
// Clamp preceding/following to column boundaries.
// E.g. If preceding_window == 2, then for a column of 5 elements, preceding_window will be:
// [1, 2, 2, 2, 1]
auto const preceding_calc = [preceding_window] __device__(size_type i) {
return thrust::min(i + 1, preceding_window);
};
auto const following_calc = [col_size = input.size(),
following_window] __device__(size_type i) {
return thrust::min(col_size - i - 1, following_window);
};
auto const preceding_column = expand_to_column(preceding_calc, input.size(), stream);
auto const following_column = expand_to_column(following_calc, input.size(), stream);
return cudf::detail::rolling_window(input,
default_outputs,
preceding_column->view().begin<cudf::size_type>(),
following_column->view().begin<cudf::size_type>(),
min_periods,
agg,
stream,
mr);
}
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/optimized_unbounded_window.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby/sort_helper.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/groupby.hpp>
#include <cudf/reduction/detail/reduction.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/default_stream.hpp>
namespace cudf::detail {
bool can_optimize_unbounded_window(bool unbounded_preceding,
bool unbounded_following,
size_type min_periods,
rolling_aggregation const& agg)
{
auto is_supported = [](auto const& agg) {
switch (agg.kind) {
case cudf::aggregation::Kind::COUNT_ALL: [[fallthrough]];
case cudf::aggregation::Kind::COUNT_VALID: [[fallthrough]];
case cudf::aggregation::Kind::SUM: [[fallthrough]];
case cudf::aggregation::Kind::MIN: [[fallthrough]];
case cudf::aggregation::Kind::MAX: return true;
default:
// COLLECT_LIST and COLLECT_SET can be added at a later date.
// Other aggregations do not fit into the [UNBOUNDED, UNBOUNDED]
// category. For instance:
// 1. Ranking functions (ROW_NUMBER, RANK, DENSE_RANK, PERCENT_RANK)
// use [UNBOUNDED PRECEDING, CURRENT ROW].
// 2. LEAD/LAG are defined on finite row boundaries.
return false;
}
};
return unbounded_preceding && unbounded_following && (min_periods == 1) && is_supported(agg);
}
/// Converts rolling_aggregation to corresponding reduce/groupby_aggregation.
template <typename Base>
struct aggregation_converter {
template <aggregation::Kind k>
std::unique_ptr<Base> operator()() const
{
if constexpr (std::is_same_v<Base, cudf::groupby_aggregation> and
k == aggregation::Kind::COUNT_ALL) {
// Note: COUNT_ALL cannot be used as a cudf::reduce_aggregation; cudf::reduce does not support
// it.
return cudf::make_count_aggregation<Base>(null_policy::INCLUDE);
} else if constexpr (std::is_same_v<Base, cudf::groupby_aggregation> and
k == aggregation::Kind::COUNT_VALID) {
// Note: COUNT_ALL cannot be used as a cudf::reduce_aggregation; cudf::reduce does not support
// it.
return cudf::make_count_aggregation<Base>(null_policy::EXCLUDE);
} else if constexpr (k == aggregation::Kind::SUM) {
return cudf::make_sum_aggregation<Base>();
} else if constexpr (k == aggregation::Kind::MIN) {
return cudf::make_min_aggregation<Base>();
} else if constexpr (k == aggregation::Kind::MAX) {
return cudf::make_max_aggregation<Base>();
} else {
CUDF_FAIL("Unsupported aggregation kind for optimized unbounded windows.");
}
}
};
template <typename Base>
std::unique_ptr<Base> convert_to(cudf::rolling_aggregation const& aggr)
{
return cudf::detail::aggregation_dispatcher(aggr.kind, aggregation_converter<Base>{});
}
/// Compute unbounded rolling window via groupby-aggregation.
/// Used for input that has groupby key columns.
std::unique_ptr<column> aggregation_based_rolling_window(table_view const& group_keys,
column_view const& input,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(group_keys.num_columns() > 0,
"Ungrouped rolling window not supported in aggregation path.");
auto agg_requests = std::vector<cudf::groupby::aggregation_request>{};
agg_requests.push_back(cudf::groupby::aggregation_request());
agg_requests.front().values = input;
agg_requests.front().aggregations.push_back(convert_to<cudf::groupby_aggregation>(aggr));
auto group_by = cudf::groupby::groupby{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES};
auto aggregation_results = group_by.aggregate(agg_requests, stream);
auto const& aggregation_result_col = aggregation_results.second.front().results.front();
using cudf::groupby::detail::sort::sort_groupby_helper;
auto helper = sort_groupby_helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES, {}};
auto const& group_labels = helper.group_labels(stream);
auto result_columns = cudf::detail::gather(cudf::table_view{{*aggregation_result_col}},
group_labels,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
return std::move(result_columns.front());
}
/// Compute unbounded rolling window via cudf::reduce.
/// Used for input that has no groupby keys. i.e. The window spans the column.
std::unique_ptr<column> reduction_based_rolling_window(column_view const& input,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const reduce_results = [&] {
auto const return_dtype = cudf::detail::target_type(input.type(), aggr.kind);
if (aggr.kind == aggregation::COUNT_ALL) {
return cudf::make_fixed_width_scalar(input.size(), stream);
} else if (aggr.kind == aggregation::COUNT_VALID) {
return cudf::make_fixed_width_scalar(input.size() - input.null_count(), stream);
} else {
return cudf::reduction::detail::reduce(input,
*convert_to<cudf::reduce_aggregation>(aggr),
return_dtype,
std::nullopt,
stream,
rmm::mr::get_current_device_resource());
}
}();
// Blow up results into separate column.
return cudf::make_column_from_scalar(*reduce_results, input.size(), stream, mr);
}
std::unique_ptr<column> optimized_unbounded_window(table_view const& group_keys,
column_view const& input,
rolling_aggregation const& aggr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return group_keys.num_columns() > 0
? aggregation_based_rolling_window(group_keys, input, aggr, stream, mr)
: reduction_based_rolling_window(input, aggr, stream, mr);
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/range_comparator_utils.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/strings/string_view.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/functional.h>
#include <cmath>
#include <limits>
namespace cudf::detail {
/// For order-by columns of signed types, bounds calculation might cause accidental
/// overflow/underflows. This needs to be detected and handled appropriately
/// for signed and unsigned types.
/**
* @brief Add `delta` to value, and cap at numeric_limits::max(), for signed types.
*/
template <typename T, CUDF_ENABLE_IF(cuda::std::numeric_limits<T>::is_signed)>
__host__ __device__ T add_safe(T const& value, T const& delta)
{
if constexpr (std::is_floating_point_v<T>) {
if (std::isinf(value) or std::isnan(value)) { return value; }
}
// delta >= 0.
return (value < 0 || (cuda::std::numeric_limits<T>::max() - value) >= delta)
? (value + delta)
: cuda::std::numeric_limits<T>::max();
}
/**
* @brief Add `delta` to value, and cap at numeric_limits::max(), for unsigned types.
*/
template <typename T, CUDF_ENABLE_IF(not cuda::std::numeric_limits<T>::is_signed)>
__host__ __device__ T add_safe(T const& value, T const& delta)
{
// delta >= 0.
return ((cuda::std::numeric_limits<T>::max() - value) >= delta)
? (value + delta)
: cuda::std::numeric_limits<T>::max();
}
/**
* @brief Subtract `delta` from value, and cap at numeric_limits::lowest(), for signed types.
*
* Note: We use numeric_limits::lowest() instead of min() because for floats, lowest() returns
* the smallest finite value, as opposed to min() which returns the smallest _positive_ value.
*/
template <typename T, CUDF_ENABLE_IF(cuda::std::numeric_limits<T>::is_signed)>
__host__ __device__ T subtract_safe(T const& value, T const& delta)
{
if constexpr (std::is_floating_point_v<T>) {
if (std::isinf(value) or std::isnan(value)) { return value; }
}
// delta >= 0;
return (value >= 0 || (value - cuda::std::numeric_limits<T>::lowest()) >= delta)
? (value - delta)
: cuda::std::numeric_limits<T>::lowest();
}
/**
* @brief Subtract `delta` from value, and cap at numeric_limits::lowest(), for unsigned types.
*
* Note: We use numeric_limits::lowest() instead of min() because for floats, lowest() returns
* the smallest finite value, as opposed to min() which returns the smallest _positive_ value.
*
* This distinction isn't truly relevant for this overload (because float is signed).
* lowest() is kept for uniformity.
*/
template <typename T, CUDF_ENABLE_IF(not cuda::std::numeric_limits<T>::is_signed)>
__host__ __device__ T subtract_safe(T const& value, T const& delta)
{
// delta >= 0;
return ((value - cuda::std::numeric_limits<T>::lowest()) >= delta)
? (value - delta)
: cuda::std::numeric_limits<T>::lowest();
}
/**
* @brief Comparator for numeric order-by columns, handling floating point NaN values.
*
* This is required for binary search through sorted vectors that contain NaN values.
* With ascending sort, NaN values are stored at the end of the sequence, even
* greater than infinity.
* But thrust::less would have trouble locating it because:
* 1. thrust::less(NaN, 10) returns false
* 2. thrust::less(10, NaN) also returns false
*
* This comparator honors the position of NaN values vis-à-vis non-NaN values.
*
*/
struct nan_aware_less {
template <typename T, CUDF_ENABLE_IF(not cudf::is_floating_point<T>())>
__host__ __device__ bool operator()(T const& lhs, T const& rhs) const
{
return thrust::less<T>{}(lhs, rhs);
}
template <typename T, CUDF_ENABLE_IF(cudf::is_floating_point<T>())>
__host__ __device__ bool operator()(T const& lhs, T const& rhs) const
{
if (std::isnan(lhs)) { return false; }
return std::isnan(rhs) or thrust::less<T>{}(lhs, rhs);
}
};
/**
* @brief Comparator for numeric order-by columns, handling floating point NaN values.
*
* This is required for binary search through sorted vectors that contain NaN values.
* With descending sort, NaN values are stored at the beginning of the sequence, even
* greater than infinity.
* But thrust::greater would have trouble locating it because:
* 1. thrust::greater(NaN, 10) returns false
* 2. thrust::greater(10, NaN) also returns false
*
* This comparator honors the position of NaN values vis-à-vis non-NaN values.
*
*/
struct nan_aware_greater {
template <typename T>
__host__ __device__ bool operator()(T const& lhs, T const& rhs) const
{
return nan_aware_less{}(rhs, lhs);
}
};
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling_collect_list.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rolling_collect_list.cuh"
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <rmm/device_uvector.hpp>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
/**
* @see cudf::detail::get_list_child_to_list_row_mapping
*/
std::unique_ptr<column> get_list_child_to_list_row_mapping(cudf::column_view const& offsets,
rmm::cuda_stream_view stream)
{
// First, scatter the count for each repeated offset (except the first and last),
// into a column of N `0`s, where N == number of child rows.
// For example:
// offsets == [0, 2, 5, 8, 11, 13]
// scatter result == [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0]
//
// An example with empty list row at index 2:
// offsets == [0, 2, 5, 5, 8, 11, 13]
// scatter result == [0, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 1, 0]
//
auto const num_child_rows{
cudf::detail::get_value<size_type>(offsets, offsets.size() - 1, stream)};
auto per_row_mapping = make_fixed_width_column(
data_type{type_to_id<size_type>()}, num_child_rows, mask_state::UNALLOCATED, stream);
auto per_row_mapping_begin = per_row_mapping->mutable_view().template begin<size_type>();
thrust::fill_n(rmm::exec_policy(stream), per_row_mapping_begin, num_child_rows, 0);
auto const begin = thrust::make_counting_iterator<size_type>(0);
thrust::scatter_if(rmm::exec_policy(stream),
begin,
begin + offsets.size() - 1,
offsets.begin<size_type>(),
begin, // stencil iterator
per_row_mapping_begin,
[offset = offsets.begin<size_type>()] __device__(auto i) {
return offset[i] != offset[i + 1];
}); // [0,0,1,0,0,3,...]
// Next, generate mapping with inclusive_scan(max) on the scatter result.
// For the example above:
// scatter result == [0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0]
// inclusive_scan == [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4]
//
// For the case with an empty list at index 2:
// scatter result == [0, 0, 1, 0, 0, 3, 0, 0, 4, 0, 0, 5, 0]
// inclusive_scan == [0, 0, 1, 1, 1, 3, 3, 3, 4, 4, 4, 5, 5]
thrust::inclusive_scan(rmm::exec_policy(stream),
per_row_mapping_begin,
per_row_mapping_begin + num_child_rows,
per_row_mapping_begin,
thrust::maximum{});
return per_row_mapping;
}
/**
* @see cudf::detail::count_child_nulls
*/
size_type count_child_nulls(column_view const& input,
std::unique_ptr<column> const& gather_map,
rmm::cuda_stream_view stream)
{
auto input_device_view = column_device_view::create(input, stream);
auto input_row_is_null = [d_input = *input_device_view] __device__(auto i) {
return d_input.is_null_nocheck(i);
};
return thrust::count_if(rmm::exec_policy(stream),
gather_map->view().begin<size_type>(),
gather_map->view().end<size_type>(),
input_row_is_null);
}
/**
* @see cudf::detail::rolling_collect_list
*/
std::pair<std::unique_ptr<column>, std::unique_ptr<column>> purge_null_entries(
column_view const& input,
column_view const& gather_map,
column_view const& offsets,
size_type num_child_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input_device_view = column_device_view::create(input, stream);
auto input_row_not_null = [d_input = *input_device_view] __device__(auto i) {
return d_input.is_valid_nocheck(i);
};
// Purge entries in gather_map that correspond to null input.
auto new_gather_map = make_fixed_width_column(data_type{type_to_id<size_type>()},
gather_map.size() - num_child_nulls,
mask_state::UNALLOCATED,
stream);
thrust::copy_if(rmm::exec_policy(stream),
gather_map.template begin<size_type>(),
gather_map.template end<size_type>(),
new_gather_map->mutable_view().template begin<size_type>(),
input_row_not_null);
// Recalculate offsets after null entries are purged.
auto new_sizes = make_fixed_width_column(
data_type{type_to_id<size_type>()}, input.size(), mask_state::UNALLOCATED, stream);
thrust::tabulate(rmm::exec_policy(stream),
new_sizes->mutable_view().template begin<size_type>(),
new_sizes->mutable_view().template end<size_type>(),
[d_gather_map = gather_map.template begin<size_type>(),
d_old_offsets = offsets.template begin<size_type>(),
input_row_not_null] __device__(auto i) {
return thrust::count_if(thrust::seq,
d_gather_map + d_old_offsets[i],
d_gather_map + d_old_offsets[i + 1],
input_row_not_null);
});
auto new_offsets = std::get<0>(
cudf::detail::make_offsets_child_column(new_sizes->view().template begin<size_type>(),
new_sizes->view().template end<size_type>(),
stream,
mr));
return std::make_pair<std::unique_ptr<column>, std::unique_ptr<column>>(std::move(new_gather_map),
std::move(new_offsets));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/detail/rolling_jit.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
namespace cudf {
namespace detail {
template <class T>
T minimum(T a, T b)
{
return b < a ? b : a;
}
struct preceding_window_wrapper {
cudf::size_type const* d_group_offsets;
cudf::size_type const* d_group_labels;
cudf::size_type preceding_window;
cudf::size_type operator[](cudf::size_type idx)
{
auto group_label = d_group_labels[idx];
auto group_start = d_group_offsets[group_label];
return minimum(preceding_window, idx - group_start + 1); // Preceding includes current row.
}
};
struct following_window_wrapper {
cudf::size_type const* d_group_offsets;
cudf::size_type const* d_group_labels;
cudf::size_type following_window;
cudf::size_type operator[](cudf::size_type idx)
{
auto group_label = d_group_labels[idx];
auto group_end =
d_group_offsets[group_label +
1]; // Cannot fall off the end, since offsets is capped with `input.size()`.
return minimum(following_window, (group_end - 1) - idx);
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/jit/kernel.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rolling/detail/rolling_jit.hpp>
#include <rolling/jit/operation.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
namespace cudf {
namespace rolling {
namespace jit {
template <typename WindowType>
cudf::size_type __device__ get_window(WindowType window, cudf::thread_index_type index)
{
return window[index];
}
template <>
cudf::size_type __device__ get_window(cudf::size_type window, cudf::thread_index_type index)
{
return window;
}
template <typename InType,
typename OutType,
class agg_op,
typename PrecedingWindowType,
typename FollowingWindowType>
__global__ void gpu_rolling_new(cudf::size_type nrows,
InType const* const __restrict__ in_col,
cudf::bitmask_type const* const __restrict__ in_col_valid,
OutType* __restrict__ out_col,
cudf::bitmask_type* __restrict__ out_col_valid,
cudf::size_type* __restrict__ output_valid_count,
PrecedingWindowType preceding_window_begin,
FollowingWindowType following_window_begin,
cudf::size_type min_periods)
{
cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x;
cudf::thread_index_type const stride = blockDim.x * gridDim.x;
cudf::size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffff'ffffu, i < nrows);
while (i < nrows) {
int64_t const preceding_window = get_window(preceding_window_begin, i);
int64_t const following_window = get_window(following_window_begin, i);
// compute bounds
auto const start = static_cast<cudf::size_type>(
min(static_cast<int64_t>(nrows), max(int64_t{0}, i - preceding_window + 1)));
auto const end = static_cast<cudf::size_type>(
min(static_cast<int64_t>(nrows), max(int64_t{0}, i + following_window + 1)));
auto const start_index = min(start, end);
auto const end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
cudf::size_type count = end_index - start_index;
OutType val = agg_op::template operate<OutType, InType>(in_col, start_index, count);
// check if we have enough input samples
bool const output_is_valid = (count >= min_periods);
// set the mask
unsigned int const result_mask = __ballot_sync(active_threads, output_is_valid);
// store the output value, one per thread
if (output_is_valid) { out_col[i] = val; }
// only one thread writes the mask
if (0 == cudf::intra_word_index(i)) {
out_col_valid[cudf::word_index(i)] = result_mask;
warp_valid_count += __popc(result_mask);
}
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < nrows);
}
// TODO: likely faster to do a single_lane_block_reduce and a single
// atomic per block but that requires jitifying single_lane_block_reduce...
if (0 == cudf::intra_word_index(threadIdx.x)) { atomicAdd(output_valid_count, warp_valid_count); }
}
} // namespace jit
} // namespace rolling
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/jit/operation-udf.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// This file serves as a placeholder for user defined functions, so jitify can choose to override it
// at runtime.
| 0 |
rapidsai_public_repos/cudf/cpp/src/rolling
|
rapidsai_public_repos/cudf/cpp/src/rolling/jit/operation.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <rolling/jit/operation-udf.hpp>
#pragma once
struct rolling_udf_ptx {
template <typename OutType, typename InType>
static OutType operate(InType const* in_col, cudf::size_type start, cudf::size_type count)
{
OutType ret;
rolling_udf(&ret, 0, 0, 0, 0, &in_col[start], count, sizeof(InType));
return ret;
}
};
struct rolling_udf_cuda {
template <typename OutType, typename InType>
static OutType operate(InType const* in_col, cudf::size_type start, cudf::size_type count)
{
OutType ret;
rolling_udf(&ret, in_col, start, count);
return ret;
}
};
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reshape/byte_cast.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/lists/detail/lists_column_factories.hpp>
#include <cudf/reshape.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
// Data type of the output data column after conversion.
constexpr data_type output_type{type_id::UINT8};
template <typename T, typename Enable = void>
struct byte_list_conversion_fn {
template <typename... Args>
static std::unique_ptr<column> invoke(Args&&...)
{
CUDF_FAIL("Unsupported non-numeric and non-string column");
}
};
struct byte_list_conversion_dispatcher {
template <typename T>
std::unique_ptr<column> operator()(column_view const& input,
flip_endianness configuration,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
return byte_list_conversion_fn<T>::invoke(input, configuration, stream, mr);
}
};
template <typename T>
struct byte_list_conversion_fn<T, std::enable_if_t<cudf::is_numeric<T>()>> {
static std::unique_ptr<column> invoke(column_view const& input,
flip_endianness configuration,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.size() == 0) {
return cudf::lists::detail::make_empty_lists_column(output_type, stream, mr);
}
if (input.size() == input.null_count()) {
return cudf::lists::detail::make_all_nulls_lists_column(
input.size(), output_type, stream, mr);
}
auto const num_bytes = static_cast<size_type>(input.size() * sizeof(T));
auto byte_column =
make_numeric_column(output_type, num_bytes, mask_state::UNALLOCATED, stream, mr);
auto const d_inp = reinterpret_cast<char const*>(input.data<T>());
auto const d_out = byte_column->mutable_view().data<char>();
if (configuration == flip_endianness::YES) {
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_bytes),
[d_inp, d_out] __device__(auto index) {
constexpr auto mask = static_cast<size_type>(sizeof(T) - 1);
d_out[index] = d_inp[index + mask - ((index & mask) << 1)];
});
} else {
thrust::copy_n(rmm::exec_policy(stream), d_inp, num_bytes, d_out);
}
auto const it = thrust::make_constant_iterator(cudf::size_of(input.type()));
auto offsets_column =
std::get<0>(cudf::detail::make_offsets_child_column(it, it + input.size(), stream, mr));
auto result = make_lists_column(input.size(),
std::move(offsets_column),
std::move(byte_column),
input.null_count(),
detail::copy_bitmask(input, stream, mr),
stream,
mr);
// If any nulls are present, the corresponding lists must be purged so that
// the result is sanitized.
if (auto const result_cv = result->view();
cudf::detail::has_nonempty_nulls(result_cv, stream)) {
return cudf::detail::purge_nonempty_nulls(result_cv, stream, mr);
}
return result;
}
};
template <typename T>
struct byte_list_conversion_fn<T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>> {
static std::unique_ptr<column> invoke(column_view const& input,
flip_endianness,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.size() == 0) {
return cudf::lists::detail::make_empty_lists_column(output_type, stream, mr);
}
if (input.size() == input.null_count()) {
return cudf::lists::detail::make_all_nulls_lists_column(
input.size(), output_type, stream, mr);
}
auto col_content = std::make_unique<column>(input, stream, mr)->release();
auto chars_contents = col_content.children[strings_column_view::chars_column_index]->release();
auto const num_chars = chars_contents.data->size();
auto uint8_col = std::make_unique<column>(
output_type, num_chars, std::move(*(chars_contents.data)), rmm::device_buffer{}, 0);
auto result = make_lists_column(
input.size(),
std::move(col_content.children[cudf::strings_column_view::offsets_column_index]),
std::move(uint8_col),
input.null_count(),
detail::copy_bitmask(input, stream, mr),
stream,
mr);
// If any nulls are present, the corresponding lists must be purged so that
// the result is sanitized.
if (auto const result_cv = result->view();
cudf::detail::has_nonempty_nulls(result_cv, stream)) {
return cudf::detail::purge_nonempty_nulls(result_cv, stream, mr);
}
return result;
}
};
} // namespace
/**
* @copydoc cudf::byte_cast(column_view const&, flip_endianness, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> byte_cast(column_view const& input,
flip_endianness endian_configuration,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(
input.type(), byte_list_conversion_dispatcher{}, input, endian_configuration, stream, mr);
}
} // namespace detail
/**
* @copydoc cudf::byte_cast(column_view const&, flip_endianness, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<column> byte_cast(column_view const& input,
flip_endianness endian_configuration,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::byte_cast(input, endian_configuration, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reshape/tile.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/reshape.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <memory>
namespace cudf {
namespace {
struct tile_functor {
size_type count;
size_type __device__ operator()(size_type i) { return i % count; }
};
} // anonymous namespace
namespace detail {
std::unique_ptr<table> tile(table_view const& in,
size_type count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(count >= 0, "Count cannot be negative");
auto const in_num_rows = in.num_rows();
if (count == 0 or in_num_rows == 0) { return empty_like(in); }
auto out_num_rows = in_num_rows * count;
auto tiled_it = cudf::detail::make_counting_transform_iterator(0, tile_functor{in_num_rows});
return detail::gather(
in, tiled_it, tiled_it + out_num_rows, out_of_bounds_policy::DONT_CHECK, stream, mr);
}
} // namespace detail
std::unique_ptr<table> tile(table_view const& in,
size_type count,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::tile(in, count, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reshape/interleave_columns.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/reshape.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/detail/interleave_columns.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
namespace {
// Error case when no other overload or specialization is available
template <typename T, typename Enable = void>
struct interleave_columns_impl {
template <typename... Args>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Unsupported type in `interleave_columns`.");
}
};
struct interleave_columns_functor {
template <typename T>
std::unique_ptr<cudf::column> operator()(table_view const& input,
bool create_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return interleave_columns_impl<T>{}(input, create_mask, stream, mr);
}
};
template <typename T>
struct interleave_columns_impl<T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>> {
std::unique_ptr<column> operator()(table_view const& lists_columns,
bool create_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return lists::detail::interleave_columns(lists_columns, create_mask, stream, mr);
}
};
template <typename T>
struct interleave_columns_impl<T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>> {
std::unique_ptr<cudf::column> operator()(table_view const& structs_columns,
bool create_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// We can safely call `column(0)` as the number of columns is known to be non zero.
auto const num_children = structs_columns.column(0).num_children();
CUDF_EXPECTS(
std::all_of(structs_columns.begin(),
structs_columns.end(),
[num_children](auto const& col) { return col.num_children() == num_children; }),
"Number of children of the input structs columns must be the same");
auto const num_columns = structs_columns.num_columns();
auto const num_rows = structs_columns.num_rows();
auto const output_size = num_columns * num_rows;
// Interleave the children of the structs columns.
std::vector<std::unique_ptr<cudf::column>> output_struct_members;
for (size_type child_idx = 0; child_idx < num_children; ++child_idx) {
// Collect children columns from the input structs columns at index `child_idx`.
auto const child_iter = thrust::make_transform_iterator(
structs_columns.begin(), [&stream = stream, child_idx](auto const& col) {
return structs_column_view(col).get_sliced_child(child_idx, stream);
});
auto children = std::vector<column_view>(child_iter, child_iter + num_columns);
auto const child_type = children.front().type();
CUDF_EXPECTS(
std::all_of(children.cbegin(),
children.cend(),
[child_type](auto const& col) { return child_type == col.type(); }),
"Children of the input structs columns at the same child index must have the same type");
auto const children_nullable = std::any_of(
children.cbegin(), children.cend(), [](auto const& col) { return col.nullable(); });
output_struct_members.emplace_back(
type_dispatcher<dispatch_storage_type>(child_type,
interleave_columns_functor{},
table_view{std::move(children)},
children_nullable,
stream,
mr));
}
auto const create_mask_fn = [&] {
auto const input_dv_ptr = table_device_view::create(structs_columns, stream);
auto const validity_fn = [input_dv = *input_dv_ptr, num_columns] __device__(auto const idx) {
return input_dv.column(idx % num_columns).is_valid(idx / num_columns);
};
return cudf::detail::valid_if(thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(output_size),
validity_fn,
stream,
mr);
};
// Only create null mask if at least one input structs column is nullable.
auto [null_mask, null_count] =
create_mask ? create_mask_fn() : std::pair{rmm::device_buffer{0, stream, mr}, size_type{0}};
return make_structs_column(
output_size, std::move(output_struct_members), null_count, std::move(null_mask), stream, mr);
}
};
template <typename T>
struct interleave_columns_impl<T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>> {
std::unique_ptr<cudf::column> operator()(table_view const& strings_columns,
bool create_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto num_columns = strings_columns.num_columns();
if (num_columns == 1) // Single strings column returns a copy
return std::make_unique<column>(*(strings_columns.begin()), stream, mr);
auto strings_count = strings_columns.num_rows();
if (strings_count == 0) // All columns have 0 rows
return make_empty_column(type_id::STRING);
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns, stream);
auto d_table = *table;
auto num_strings = num_columns * strings_count;
std::pair<rmm::device_buffer, size_type> valid_mask{};
if (create_mask) {
// Create resulting null mask
valid_mask = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_strings),
[num_columns, d_table] __device__(size_type idx) {
auto source_row_idx = idx % num_columns;
auto source_col_idx = idx / num_columns;
return !d_table.column(source_row_idx).is_null(source_col_idx);
},
stream,
mr);
}
auto const null_count = valid_mask.second;
// Build offsets column by computing sizes of each string in the output
auto offsets_transformer = [num_columns, d_table] __device__(size_type idx) {
// First compute the column and the row this item belongs to
auto source_row_idx = idx % num_columns;
auto source_col_idx = idx / num_columns;
return d_table.column(source_row_idx).is_valid(source_col_idx)
? d_table.column(source_row_idx).element<string_view>(source_col_idx).size_bytes()
: 0;
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto [offsets_column, bytes] = cudf::detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + num_strings, stream, mr);
auto d_results_offsets = offsets_column->view().template data<int32_t>();
// Create the chars column
auto chars_column = strings::detail::create_chars_child_column(bytes, stream, mr);
// Fill the chars column
auto d_results_chars = chars_column->mutable_view().template data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
num_strings,
[num_columns, d_table, d_results_offsets, d_results_chars] __device__(size_type idx) {
auto source_row_idx = idx % num_columns;
auto source_col_idx = idx / num_columns;
// Do not write to buffer if the column value for this row is null
if (d_table.column(source_row_idx).is_null(source_col_idx)) return;
size_type offset = d_results_offsets[idx];
char* d_buffer = d_results_chars + offset;
strings::detail::copy_string(
d_buffer, d_table.column(source_row_idx).element<string_view>(source_col_idx));
});
return make_strings_column(num_strings,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(valid_mask.first));
}
};
template <typename T>
struct interleave_columns_impl<T, std::enable_if_t<cudf::is_fixed_width<T>()>> {
std::unique_ptr<cudf::column> operator()(table_view const& input,
bool create_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto arch_column = input.column(0);
auto output_size = input.num_columns() * input.num_rows();
auto output =
detail::allocate_like(arch_column, output_size, mask_allocation_policy::NEVER, stream, mr);
auto device_input = table_device_view::create(input, stream);
auto device_output = mutable_column_device_view::create(*output, stream);
auto index_begin = thrust::make_counting_iterator<size_type>(0);
auto index_end = thrust::make_counting_iterator<size_type>(output_size);
auto func_value = [input = *device_input,
divisor = input.num_columns()] __device__(size_type idx) {
return input.column(idx % divisor).element<T>(idx / divisor);
};
if (not create_mask) {
thrust::transform(
rmm::exec_policy(stream), index_begin, index_end, device_output->begin<T>(), func_value);
return output;
}
auto func_validity = [input = *device_input,
divisor = input.num_columns()] __device__(size_type idx) {
return input.column(idx % divisor).is_valid(idx / divisor);
};
thrust::transform_if(rmm::exec_policy(stream),
index_begin,
index_end,
device_output->begin<T>(),
func_value,
func_validity);
auto [mask, null_count] = valid_if(index_begin, index_end, func_validity, stream, mr);
output->set_null_mask(std::move(mask), null_count);
return output;
}
};
} // anonymous namespace
std::unique_ptr<column> interleave_columns(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.num_columns() > 0, "input must have at least one column to determine dtype.");
auto const dtype = input.column(0).type();
CUDF_EXPECTS(std::all_of(std::cbegin(input),
std::cend(input),
[dtype](auto const& col) { return dtype == col.type(); }),
"Input columns must have the same type");
auto const output_needs_mask = std::any_of(
std::cbegin(input), std::cend(input), [](auto const& col) { return col.nullable(); });
return type_dispatcher<dispatch_storage_type>(
dtype, detail::interleave_columns_functor{}, input, output_needs_mask, stream, mr);
}
} // namespace detail
std::unique_ptr<column> interleave_columns(table_view const& input,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::interleave_columns(input, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/column/column.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/detail/copying.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/copying.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <vector>
namespace cudf {
// Copy ctor w/ optional stream/mr
column::column(column const& other,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _type{other._type},
_size{other._size},
_data{other._data, stream, mr},
_null_mask{other._null_mask, stream, mr},
_null_count{other._null_count}
{
_children.reserve(other.num_children());
for (auto const& c : other._children) {
_children.emplace_back(std::make_unique<column>(*c, stream, mr));
}
}
// Move constructor
column::column(column&& other) noexcept
: _type{other._type},
_size{other._size},
_data{std::move(other._data)},
_null_mask{std::move(other._null_mask)},
_null_count{other._null_count},
_children{std::move(other._children)}
{
other._size = 0;
other._null_count = 0;
other._type = data_type{type_id::EMPTY};
}
// Release contents
column::contents column::release() noexcept
{
_size = 0;
_null_count = 0;
_type = data_type{type_id::EMPTY};
return column::contents{std::make_unique<rmm::device_buffer>(std::move(_data)),
std::make_unique<rmm::device_buffer>(std::move(_null_mask)),
std::move(_children)};
}
// Create immutable view
column_view column::view() const
{
// Create views of children
std::vector<column_view> child_views;
child_views.reserve(_children.size());
for (auto const& c : _children) {
child_views.emplace_back(*c);
}
return column_view{type(),
size(),
_data.data(),
static_cast<bitmask_type const*>(_null_mask.data()),
null_count(),
0,
child_views};
}
// Create mutable view
mutable_column_view column::mutable_view()
{
CUDF_FUNC_RANGE();
// create views of children
std::vector<mutable_column_view> child_views;
child_views.reserve(_children.size());
for (auto const& c : _children) {
child_views.emplace_back(*c);
}
return mutable_column_view{type(),
size(),
_data.data(),
static_cast<bitmask_type*>(_null_mask.data()),
_null_count,
0,
child_views};
}
void column::set_null_mask(rmm::device_buffer&& new_null_mask, size_type new_null_count)
{
if (new_null_count > 0) {
CUDF_EXPECTS(new_null_mask.size() >= cudf::bitmask_allocation_size_bytes(this->size()),
"Column with null values must be nullable and the null mask \
buffer size should match the size of the column.");
}
_null_mask = std::move(new_null_mask); // move
_null_count = new_null_count;
}
void column::set_null_mask(rmm::device_buffer const& new_null_mask,
size_type new_null_count,
rmm::cuda_stream_view stream)
{
if (new_null_count > 0) {
CUDF_EXPECTS(new_null_mask.size() >= cudf::bitmask_allocation_size_bytes(this->size()),
"Column with null values must be nullable and the null mask \
buffer size should match the size of the column.");
}
_null_mask = rmm::device_buffer{new_null_mask, stream}; // copy
_null_count = new_null_count;
}
void column::set_null_count(size_type new_null_count)
{
if (new_null_count > 0) { CUDF_EXPECTS(nullable(), "Invalid null count."); }
_null_count = new_null_count;
}
namespace {
struct create_column_from_view {
cudf::column_view view;
rmm::cuda_stream_view stream{cudf::get_default_stream()};
rmm::mr::device_memory_resource* mr;
template <typename ColumnType,
std::enable_if_t<std::is_same_v<ColumnType, cudf::string_view>>* = nullptr>
std::unique_ptr<column> operator()()
{
cudf::strings_column_view sview(view);
return cudf::strings::detail::copy_slice(sview, 0, view.size(), stream, mr);
}
template <typename ColumnType,
std::enable_if_t<std::is_same_v<ColumnType, cudf::dictionary32>>* = nullptr>
std::unique_ptr<column> operator()()
{
std::vector<std::unique_ptr<column>> children;
if (view.num_children()) {
cudf::dictionary_column_view dict_view(view);
auto indices_view = column_view(dict_view.indices().type(),
dict_view.size(),
dict_view.indices().head(),
nullptr,
0,
dict_view.offset());
children.emplace_back(std::make_unique<column>(indices_view, stream, mr));
children.emplace_back(std::make_unique<column>(dict_view.keys(), stream, mr));
}
return std::make_unique<column>(view.type(),
view.size(),
rmm::device_buffer{0, stream, mr},
cudf::detail::copy_bitmask(view, stream, mr),
view.null_count(),
std::move(children));
}
template <typename ColumnType, std::enable_if_t<cudf::is_fixed_width<ColumnType>()>* = nullptr>
std::unique_ptr<column> operator()()
{
auto op = [&](auto const& child) { return std::make_unique<column>(child, stream, mr); };
auto begin = thrust::make_transform_iterator(view.child_begin(), op);
auto children = std::vector<std::unique_ptr<column>>(begin, begin + view.num_children());
return std::make_unique<column>(
view.type(),
view.size(),
rmm::device_buffer{
static_cast<char const*>(view.head()) + (view.offset() * cudf::size_of(view.type())),
view.size() * cudf::size_of(view.type()),
stream,
mr},
cudf::detail::copy_bitmask(view, stream, mr),
view.null_count(),
std::move(children));
}
template <typename ColumnType,
std::enable_if_t<std::is_same_v<ColumnType, cudf::list_view>>* = nullptr>
std::unique_ptr<column> operator()()
{
auto lists_view = lists_column_view(view);
return cudf::lists::detail::copy_slice(lists_view, 0, view.size(), stream, mr);
}
template <typename ColumnType,
std::enable_if_t<std::is_same_v<ColumnType, cudf::struct_view>>* = nullptr>
std::unique_ptr<column> operator()()
{
if (view.is_empty()) { return cudf::empty_like(view); }
std::vector<std::unique_ptr<column>> children;
children.reserve(view.num_children());
auto begin = view.offset();
auto end = begin + view.size();
std::transform(view.child_begin(),
view.child_end(),
std::back_inserter(children),
[begin, end, stream = this->stream, mr = this->mr](auto child) {
return std::make_unique<column>(
cudf::detail::slice(child, begin, end, stream), stream, mr);
});
auto num_rows = view.size();
return make_structs_column(num_rows,
std::move(children),
view.null_count(),
cudf::detail::copy_bitmask(view.null_mask(), begin, end, stream, mr),
stream,
mr);
}
};
} // anonymous namespace
// Copy from a view
column::column(column_view view, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
: // Move is needed here because the dereference operator of unique_ptr returns
// an lvalue reference, which would otherwise dispatch to the copy constructor
column{std::move(*type_dispatcher(view.type(), create_column_from_view{view, stream, mr}))}
{
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/column/column_device_view.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <numeric>
namespace cudf {
// Trivially copy all members but the children
column_device_view::column_device_view(column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
// Free device memory allocated for children
void column_device_view::destroy() { delete this; }
namespace {
// helper function for column_device_view::create and mutable_column_device::create methods
template <typename ColumnView, typename ColumnDeviceView>
std::unique_ptr<ColumnDeviceView, std::function<void(ColumnDeviceView*)>>
create_device_view_from_view(ColumnView const& source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
// First calculate the size of memory needed to hold the child columns. This is done by calling
// extent() for each of the children.
auto get_extent = cudf::detail::make_counting_transform_iterator(
0, [&source](auto i) { return ColumnDeviceView::extent(source.child(i)); });
// pad the allocation for aligning the first pointer
auto const descendant_storage_bytes = std::accumulate(
get_extent, get_extent + num_children, std::size_t{alignof(ColumnDeviceView) - 1});
// A buffer of CPU memory is allocated to hold the ColumnDeviceView
// objects. Once filled, the CPU memory is copied to device memory
// and then set into the d_children member pointer.
std::vector<char> staging_buffer(descendant_storage_bytes);
// Each ColumnDeviceView instance may have child objects that
// require setting some internal device pointers before being copied
// from CPU to device.
rmm::device_buffer* const descendant_storage =
new rmm::device_buffer(descendant_storage_bytes, stream);
auto deleter = [descendant_storage](ColumnDeviceView* v) {
v->destroy();
delete descendant_storage;
};
std::unique_ptr<ColumnDeviceView, decltype(deleter)> result{
new ColumnDeviceView(source, staging_buffer.data(), descendant_storage->data()), deleter};
// copy the CPU memory with all the children into device memory
CUDF_CUDA_TRY(cudaMemcpyAsync(descendant_storage->data(),
staging_buffer.data(),
descendant_storage->size(),
cudaMemcpyDefault,
stream.value()));
stream.synchronize();
return result;
}
} // namespace
// Place any child objects in host memory (h_ptr) and use the device
// memory ptr (d_ptr) to set any child object pointers.
column_device_view::column_device_view(column_view source, void* h_ptr, void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<column_device_view, std::function<void(column_device_view*)>>
column_device_view::create(column_view source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
if (num_children == 0) {
// Can't use make_unique since the ctor is protected
return std::unique_ptr<column_device_view>(new column_device_view(source));
}
return create_device_view_from_view<column_view, column_device_view>(source, stream);
}
std::size_t column_device_view::extent(column_view const& source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(column_device_view));
}
// For use with inplace-new to pre-fill memory to be copied to device
mutable_column_device_view::mutable_column_device_view(mutable_column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
mutable_column_device_view::mutable_column_device_view(mutable_column_view source,
void* h_ptr,
void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<mutable_column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Handle freeing children
void mutable_column_device_view::destroy() { delete this; }
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>>
mutable_column_device_view::create(mutable_column_view source, rmm::cuda_stream_view stream)
{
return source.num_children() == 0
? std::unique_ptr<mutable_column_device_view>(new mutable_column_device_view(source))
: create_device_view_from_view<mutable_column_view, mutable_column_device_view>(source,
stream);
}
std::size_t mutable_column_device_view::extent(mutable_column_view source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(mutable_column_device_view));
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/column/column_view.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <algorithm>
#include <exception>
#include <numeric>
#include <vector>
namespace cudf {
namespace detail {
column_view_base::column_view_base(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type null_count,
size_type offset)
: _type{type},
_size{size},
_data{data},
_null_mask{null_mask},
_null_count{null_count},
_offset{offset}
{
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
if (type.id() == type_id::EMPTY) {
_null_count = size;
CUDF_EXPECTS(nullptr == data, "EMPTY column should have no data.");
CUDF_EXPECTS(nullptr == null_mask, "EMPTY column should have no null mask.");
} else if (is_compound(type)) {
CUDF_EXPECTS(nullptr == data, "Compound (parent) columns cannot have data");
} else if (size > 0) {
CUDF_EXPECTS(nullptr != data, "Null data pointer.");
}
CUDF_EXPECTS(offset >= 0, "Invalid offset.");
if ((null_count > 0) and (type.id() != type_id::EMPTY)) {
CUDF_EXPECTS(nullptr != null_mask, "Invalid null mask for non-zero null count.");
}
}
size_type column_view_base::null_count(size_type begin, size_type end) const
{
CUDF_EXPECTS((begin >= 0) && (end <= size()) && (begin <= end), "Range is out of bounds.");
return (null_count() == 0)
? 0
: cudf::detail::null_count(
null_mask(), offset() + begin, offset() + end, cudf::get_default_stream());
}
// Struct to use custom hash combine and fold expression
struct HashValue {
std::size_t hash;
explicit HashValue(std::size_t h) : hash{h} {}
HashValue operator^(HashValue const& other) const
{
return HashValue{cudf::hashing::detail::hash_combine(hash, other.hash)};
}
};
template <typename... Ts>
constexpr auto hash(Ts&&... ts)
{
return (... ^ HashValue(std::hash<Ts>{}(ts))).hash;
}
std::size_t shallow_hash_impl(column_view const& c, bool is_parent_empty = false)
{
std::size_t const init = (is_parent_empty or c.is_empty())
? hash(c.type(), 0)
: hash(c.type(), c.size(), c.head(), c.null_mask(), c.offset());
return std::accumulate(c.child_begin(),
c.child_end(),
init,
[&c, is_parent_empty](std::size_t hash, auto const& child) {
return cudf::hashing::detail::hash_combine(
hash, shallow_hash_impl(child, c.is_empty() or is_parent_empty));
});
}
std::size_t shallow_hash(column_view const& input) { return shallow_hash_impl(input); }
bool shallow_equivalent_impl(column_view const& lhs,
column_view const& rhs,
bool is_parent_empty = false)
{
bool const is_empty = (lhs.is_empty() and rhs.is_empty()) or is_parent_empty;
return (lhs.type() == rhs.type()) and
(is_empty or ((lhs.size() == rhs.size()) and (lhs.head() == rhs.head()) and
(lhs.null_mask() == rhs.null_mask()) and (lhs.offset() == rhs.offset()))) and
std::equal(lhs.child_begin(),
lhs.child_end(),
rhs.child_begin(),
rhs.child_end(),
[is_empty](auto const& lhs_child, auto const& rhs_child) {
return shallow_equivalent_impl(lhs_child, rhs_child, is_empty);
});
}
bool is_shallow_equivalent(column_view const& lhs, column_view const& rhs)
{
return shallow_equivalent_impl(lhs, rhs);
}
} // namespace detail
// Immutable view constructor
column_view::column_view(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type null_count,
size_type offset,
std::vector<column_view> const& children)
: detail::column_view_base{type, size, data, null_mask, null_count, offset}, _children{children}
{
if (type.id() == type_id::EMPTY) {
CUDF_EXPECTS(num_children() == 0, "EMPTY column cannot have children.");
}
}
// Mutable view constructor
mutable_column_view::mutable_column_view(data_type type,
size_type size,
void* data,
bitmask_type* null_mask,
size_type null_count,
size_type offset,
std::vector<mutable_column_view> const& children)
: detail::column_view_base{type, size, data, null_mask, null_count, offset},
mutable_children{children}
{
if (type.id() == type_id::EMPTY) {
CUDF_EXPECTS(num_children() == 0, "EMPTY column cannot have children.");
}
}
// Update the null count
void mutable_column_view::set_null_count(size_type new_null_count)
{
if (new_null_count > 0) { CUDF_EXPECTS(nullable(), "Invalid null count."); }
_null_count = new_null_count;
}
// Conversion from mutable to immutable
mutable_column_view::operator column_view() const
{
// Convert children to immutable views
std::vector<column_view> child_views(num_children());
std::copy(std::cbegin(mutable_children), std::cend(mutable_children), std::begin(child_views));
return column_view{_type, _size, _data, _null_mask, _null_count, _offset, std::move(child_views)};
}
size_type count_descendants(column_view parent)
{
auto descendants = [](auto const& child) { return count_descendants(child); };
auto begin = thrust::make_transform_iterator(parent.child_begin(), descendants);
return std::accumulate(begin, begin + parent.num_children(), size_type{parent.num_children()});
}
column_view bit_cast(column_view const& input, data_type type)
{
CUDF_EXPECTS(is_bit_castable(input._type, type), "types are not bit-castable");
return column_view{type,
input._size,
input._data,
input._null_mask,
input._null_count,
input._offset,
input._children};
}
mutable_column_view bit_cast(mutable_column_view const& input, data_type type)
{
CUDF_EXPECTS(is_bit_castable(input._type, type), "types are not bit-castable");
return mutable_column_view{type,
input._size,
const_cast<void*>(input._data),
const_cast<cudf::bitmask_type*>(input._null_mask),
input._null_count,
input._offset,
input.mutable_children};
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/column/column_factories.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/fill.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/lists/detail/lists_column_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/fill.hpp>
#include <thrust/iterator/constant_iterator.h>
namespace cudf {
namespace {
struct column_from_scalar_dispatch {
template <typename T>
std::unique_ptr<cudf::column> operator()(scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (size == 0) return make_empty_column(value.type());
if (!value.is_valid(stream))
return make_fixed_width_column(value.type(), size, mask_state::ALL_NULL, stream, mr);
auto output_column =
make_fixed_width_column(value.type(), size, mask_state::UNALLOCATED, stream, mr);
auto view = output_column->mutable_view();
detail::fill_in_place(view, 0, size, value, stream);
return output_column;
}
};
template <>
std::unique_ptr<cudf::column> column_from_scalar_dispatch::operator()<cudf::string_view>(
scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (size == 0) return make_empty_column(value.type());
// Since we are setting every row to the scalar, the fill() never needs to access
// any of the children in the strings column which would otherwise cause an exception.
column_view sc{value.type(), size, nullptr, nullptr, 0};
auto& sv = static_cast<scalar_type_t<cudf::string_view> const&>(value);
// fill the column with the scalar
auto output = strings::detail::fill(strings_column_view(sc), 0, size, sv, stream, mr);
return output;
}
template <>
std::unique_ptr<cudf::column> column_from_scalar_dispatch::operator()<cudf::dictionary32>(
scalar const&, size_type, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const
{
CUDF_FAIL("dictionary not supported when creating from scalar");
}
template <>
std::unique_ptr<cudf::column> column_from_scalar_dispatch::operator()<cudf::list_view>(
scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto lv = static_cast<list_scalar const*>(&value);
return lists::detail::make_lists_column_from_scalar(*lv, size, stream, mr);
}
template <>
std::unique_ptr<cudf::column> column_from_scalar_dispatch::operator()<cudf::struct_view>(
scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (size == 0) CUDF_FAIL("0-length struct column is unsupported.");
auto& ss = static_cast<scalar_type_t<cudf::struct_view> const&>(value);
auto iter = thrust::make_constant_iterator(0);
auto children =
detail::gather(ss.view(), iter, iter + size, out_of_bounds_policy::NULLIFY, stream, mr);
auto const is_valid = ss.is_valid(stream);
return make_structs_column(size,
std::move(children->release()),
is_valid ? 0 : size,
is_valid
? rmm::device_buffer{}
: detail::create_null_mask(size, mask_state::ALL_NULL, stream, mr),
stream,
mr);
}
} // anonymous namespace
std::unique_ptr<column> make_column_from_scalar(scalar const& s,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(s.type(), column_from_scalar_dispatch{}, s, size, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/column/column_factories.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/fill.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/detail/fill.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/constant_iterator.h>
namespace cudf {
namespace {
struct size_of_helper {
cudf::data_type type;
template <typename T, std::enable_if_t<not is_fixed_width<T>()>* = nullptr>
constexpr int operator()() const
{
CUDF_FAIL("Invalid, non fixed-width element type.");
return 0;
}
template <typename T, std::enable_if_t<is_fixed_width<T>() && not is_fixed_point<T>()>* = nullptr>
constexpr int operator()() const noexcept
{
return sizeof(T);
}
template <typename T, std::enable_if_t<is_fixed_point<T>()>* = nullptr>
constexpr int operator()() const noexcept
{
// Only want the sizeof fixed_point::Rep as fixed_point::scale is stored in data_type
return sizeof(typename T::rep);
}
};
} // namespace
std::size_t size_of(data_type element_type)
{
CUDF_EXPECTS(is_fixed_width(element_type), "Invalid element type.");
return cudf::type_dispatcher(element_type, size_of_helper{element_type});
}
// Empty column of specified type
std::unique_ptr<column> make_empty_column(data_type type)
{
CUDF_EXPECTS(type.id() == type_id::EMPTY || !cudf::is_nested(type),
"make_empty_column is invalid to call on nested types");
return std::make_unique<column>(type, 0, rmm::device_buffer{}, rmm::device_buffer{}, 0);
}
// Empty column of specified type id
std::unique_ptr<column> make_empty_column(type_id id) { return make_empty_column(data_type{id}); }
// Allocate storage for a specified number of numeric elements
std::unique_ptr<column> make_numeric_column(data_type type,
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(is_numeric(type), "Invalid, non-numeric type.");
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
return std::make_unique<column>(
type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::create_null_mask(size, state, stream, mr),
state == mask_state::UNINITIALIZED ? 0 : state_null_count(state, size),
std::vector<std::unique_ptr<column>>{});
}
// Allocate storage for a specified number of numeric elements
std::unique_ptr<column> make_fixed_point_column(data_type type,
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(is_fixed_point(type), "Invalid, non-fixed_point type.");
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
return std::make_unique<column>(
type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::create_null_mask(size, state, stream, mr),
state == mask_state::UNINITIALIZED ? 0 : state_null_count(state, size),
std::vector<std::unique_ptr<column>>{});
}
// Allocate storage for a specified number of timestamp elements
std::unique_ptr<column> make_timestamp_column(data_type type,
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(is_timestamp(type), "Invalid, non-timestamp type.");
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
return std::make_unique<column>(
type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::create_null_mask(size, state, stream, mr),
state == mask_state::UNINITIALIZED ? 0 : state_null_count(state, size),
std::vector<std::unique_ptr<column>>{});
}
// Allocate storage for a specified number of duration elements
std::unique_ptr<column> make_duration_column(data_type type,
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(is_duration(type), "Invalid, non-duration type.");
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
return std::make_unique<column>(
type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::create_null_mask(size, state, stream, mr),
state == mask_state::UNINITIALIZED ? 0 : state_null_count(state, size),
std::vector<std::unique_ptr<column>>{});
}
// Allocate storage for a specified number of fixed width elements
std::unique_ptr<column> make_fixed_width_column(data_type type,
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(is_fixed_width(type), "Invalid, non-fixed-width type.");
// clang-format off
if (is_timestamp (type)) return make_timestamp_column (type, size, state, stream, mr);
else if (is_duration (type)) return make_duration_column (type, size, state, stream, mr);
else if (is_fixed_point(type)) return make_fixed_point_column(type, size, state, stream, mr);
else return make_numeric_column (type, size, state, stream, mr);
// clang-format on
}
std::unique_ptr<column> make_dictionary_from_scalar(scalar const& s,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (size == 0) return make_empty_column(type_id::DICTIONARY32);
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
CUDF_EXPECTS(s.is_valid(stream), "cannot create a dictionary with a null key");
return make_dictionary_column(
make_column_from_scalar(s, 1, stream, mr),
make_column_from_scalar(numeric_scalar<uint32_t>(0), size, stream, mr),
rmm::device_buffer{0, stream, mr},
0);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/merge/merge.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/merge.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/merge.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/detail/merge.cuh>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <limits>
#include <numeric>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/merge.h>
#include <thrust/pair.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <queue>
#include <vector>
namespace cudf {
namespace detail {
namespace {
template <bool has_nulls>
struct row_lexicographic_tagged_comparator {
row_lexicographic_tagged_comparator(table_device_view const lhs,
table_device_view const rhs,
device_span<order const> const column_order,
device_span<null_order const> const null_precedence)
: _lhs{lhs}, _rhs{rhs}, _column_order{column_order}, _null_precedence{null_precedence}
{
}
__device__ bool operator()(index_type lhs_tagged_index,
index_type rhs_tagged_index) const noexcept
{
auto const [l_side, l_indx] = lhs_tagged_index;
auto const [r_side, r_indx] = rhs_tagged_index;
table_device_view const* ptr_left_dview{l_side == side::LEFT ? &_lhs : &_rhs};
table_device_view const* ptr_right_dview{r_side == side::LEFT ? &_lhs : &_rhs};
auto const comparator = [&]() {
if constexpr (has_nulls) {
return cudf::experimental::row::lexicographic::device_row_comparator<false, bool>{
has_nulls, *ptr_left_dview, *ptr_right_dview, _column_order, _null_precedence};
} else {
return cudf::experimental::row::lexicographic::device_row_comparator<false, bool>{
has_nulls, *ptr_left_dview, *ptr_right_dview, _column_order};
}
}();
return comparator(l_indx, r_indx) == weak_ordering::LESS;
}
private:
table_device_view const _lhs;
table_device_view const _rhs;
device_span<null_order const> const _null_precedence;
device_span<order const> const _column_order;
};
using detail::side;
using index_type = detail::index_type;
/**
* @brief Merges the bits of two validity bitmasks.
*
* Merges the bits from two column_device_views into the destination validity buffer
* according to `merged_indices` map such that bit `i` in `out_validity`
* will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol`
* if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise,
* from `right_dcol`.
*
* `left_dcol` and `right_dcol` must not overlap.
*
* @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID)
* @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID)
* @param[in] left_dcol The left column_device_view whose bits will be merged
* @param[in] right_dcol The right column_device_view whose bits will be merged
* @param[out] out_validity The output validity buffer after merging the left and right buffers
* @param[in] num_destination_rows The number of rows in the out_validity buffer
* @param[in] merged_indices The map that indicates the source of the input and index
* to be copied to the output. Length must be equal to `num_destination_rows`
*/
template <bool left_have_valids, bool right_have_valids>
__global__ void materialize_merged_bitmask_kernel(
column_device_view left_dcol,
column_device_view right_dcol,
bitmask_type* out_validity,
size_type const num_destination_rows,
index_type const* const __restrict__ merged_indices)
{
auto const stride = detail::grid_1d::grid_stride();
auto tid = detail::grid_1d::global_thread_id();
auto active_threads = __ballot_sync(0xffff'ffffu, tid < num_destination_rows);
while (tid < num_destination_rows) {
auto const destination_row = static_cast<size_type>(tid);
auto const [src_side, src_row] = merged_indices[destination_row];
bool const from_left{src_side == side::LEFT};
bool source_bit_is_valid{true};
if (left_have_valids && from_left) {
source_bit_is_valid = left_dcol.is_valid_nocheck(src_row);
} else if (right_have_valids && !from_left) {
source_bit_is_valid = right_dcol.is_valid_nocheck(src_row);
}
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)};
// Only one thread writes output
if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; }
tid += stride;
active_threads = __ballot_sync(active_threads, tid < num_destination_rows);
}
}
void materialize_bitmask(column_view const& left_col,
column_view const& right_col,
bitmask_type* out_validity,
size_type num_elements,
index_type const* merged_indices,
rmm::cuda_stream_view stream)
{
constexpr size_type BLOCK_SIZE{256};
detail::grid_1d grid_config{num_elements, BLOCK_SIZE};
auto p_left_dcol = column_device_view::create(left_col, stream);
auto p_right_dcol = column_device_view::create(right_col, stream);
auto left_valid = *p_left_dcol;
auto right_valid = *p_right_dcol;
if (left_col.has_nulls()) {
if (right_col.has_nulls()) {
materialize_merged_bitmask_kernel<true, true>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>(
left_valid, right_valid, out_validity, num_elements, merged_indices);
} else {
materialize_merged_bitmask_kernel<true, false>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>(
left_valid, right_valid, out_validity, num_elements, merged_indices);
}
} else {
if (right_col.has_nulls()) {
materialize_merged_bitmask_kernel<false, true>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>(
left_valid, right_valid, out_validity, num_elements, merged_indices);
} else {
CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called.");
}
}
CUDF_CHECK_CUDA(stream.value());
}
struct side_index_generator {
side _side;
__device__ index_type operator()(size_type i) const noexcept { return index_type{_side, i}; }
};
/**
* @brief Generates the row indices and source side (left or right) in accordance with the index
* columns.
*
*
* @tparam index_type Indicates the type to be used to collect index and side information;
* @param[in] left_table The left table_view to be merged
* @param[in] right_table The right table_view to be merged
* @param[in] column_order Sort order types of index columns
* @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the
* index columns
* @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls
* (defaults to true)
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return A device_uvector of merged indices
*/
index_vector generate_merged_indices(table_view const& left_table,
table_view const& right_table,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
bool nullable,
rmm::cuda_stream_view stream)
{
size_type const left_size = left_table.num_rows();
size_type const right_size = right_table.num_rows();
size_type const total_size = left_size + right_size;
auto left_gen = side_index_generator{side::LEFT};
auto right_gen = side_index_generator{side::RIGHT};
auto left_begin = cudf::detail::make_counting_transform_iterator(0, left_gen);
auto right_begin = cudf::detail::make_counting_transform_iterator(0, right_gen);
index_vector merged_indices(total_size, stream);
auto const has_nulls =
nullate::DYNAMIC{cudf::has_nulls(left_table) or cudf::has_nulls(right_table)};
auto lhs_device_view = table_device_view::create(left_table, stream);
auto rhs_device_view = table_device_view::create(right_table, stream);
auto d_column_order = cudf::detail::make_device_uvector_async(
column_order, stream, rmm::mr::get_current_device_resource());
if (has_nulls) {
auto const new_null_precedence = [&]() {
if (null_precedence.size() > 0) {
CUDF_EXPECTS(static_cast<size_type>(null_precedence.size()) == left_table.num_columns(),
"Null precedence vector size mismatched");
return null_precedence;
} else {
return std::vector<null_order>(left_table.num_columns(), null_order::BEFORE);
}
}();
auto d_null_precedence = cudf::detail::make_device_uvector_async(
new_null_precedence, stream, rmm::mr::get_current_device_resource());
auto ineq_op = detail::row_lexicographic_tagged_comparator<true>(
*lhs_device_view, *rhs_device_view, d_column_order, d_null_precedence);
thrust::merge(rmm::exec_policy(stream),
left_begin,
left_begin + left_size,
right_begin,
right_begin + right_size,
merged_indices.begin(),
ineq_op);
} else {
auto ineq_op = detail::row_lexicographic_tagged_comparator<false>(
*lhs_device_view, *rhs_device_view, d_column_order, {});
thrust::merge(rmm::exec_policy(stream),
left_begin,
left_begin + left_size,
right_begin,
right_begin + right_size,
merged_indices.begin(),
ineq_op);
}
CUDF_CHECK_CUDA(stream.value());
return merged_indices;
}
index_vector generate_merged_indices_nested(table_view const& left_table,
table_view const& right_table,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
bool nullable,
rmm::cuda_stream_view stream)
{
size_type const left_size = left_table.num_rows();
size_type const right_size = right_table.num_rows();
size_type const total_size = left_size + right_size;
index_vector merged_indices(total_size, stream);
auto const left_indices_col = cudf::detail::lower_bound(right_table,
left_table,
column_order,
null_precedence,
stream,
rmm::mr::get_current_device_resource());
auto const left_indices = left_indices_col->view();
auto left_indices_mutable = left_indices_col->mutable_view();
auto const left_indices_begin = left_indices.begin<cudf::size_type>();
auto const left_indices_end = left_indices.end<cudf::size_type>();
auto left_indices_mutable_begin = left_indices_mutable.begin<cudf::size_type>();
auto const total_counter = thrust::make_counting_iterator(0);
thrust::for_each(
rmm::exec_policy_nosync(stream),
total_counter,
total_counter + total_size,
[merged = merged_indices.data(), left = left_indices_begin, left_size, right_size] __device__(
auto const idx) {
// We split threads into two groups, so only one kernel is needed.
// Threads in [0, right_size) will insert right indices in sorted order.
// Threads in [right_size, total_size) will insert left indices in sorted order.
if (idx < right_size) {
// this tells us between which segments of left elements a right element
// would fall
auto const r_bound = thrust::upper_bound(thrust::seq, left, left + left_size, idx);
auto const r_segment = thrust::distance(left, r_bound);
merged[r_segment + idx] = thrust::make_pair(side::RIGHT, idx);
} else {
auto const left_idx = idx - right_size;
merged[left[left_idx] + left_idx] = thrust::make_pair(side::LEFT, left_idx);
}
});
return merged_indices;
}
/**
* @brief Generate merged column given row-order of merged tables
* (ordered according to indices of key_cols) and the 2 columns to merge.
*/
struct column_merger {
explicit column_merger(index_vector const& row_order) : row_order_(row_order) {}
template <typename Element, CUDF_ENABLE_IF(not is_rep_layout_compatible<Element>())>
std::unique_ptr<column> operator()(column_view const&,
column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL("Unsupported type for merge.");
}
// column merger operator;
//
template <typename Element>
std::enable_if_t<is_rep_layout_compatible<Element>(), std::unique_ptr<column>> operator()(
column_view const& lcol,
column_view const& rcol,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto lsz = lcol.size();
auto merged_size = lsz + rcol.size();
auto merged_col = cudf::detail::allocate_like(lcol.has_nulls() ? lcol : rcol,
merged_size,
cudf::mask_allocation_policy::RETAIN,
stream,
mr);
//"gather" data from lcol, rcol according to row_order_ "map"
//(directly calling gather() won't work because
// lcol, rcol indices overlap!)
//
cudf::mutable_column_view merged_view = merged_col->mutable_view();
// initialize null_mask to all valid:
//
// Note: this initialization in conjunction with
// _conditionally_ calling materialize_bitmask() below covers
// the case materialize_merged_bitmask_kernel<false, false>()
// which won't be called anymore (because of the _condition_
// below)
//
cudf::detail::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream);
// set the null count:
//
merged_col->set_null_count(lcol.null_count() + rcol.null_count());
// to resolve view.data()'s types use: Element
//
auto const d_lcol = lcol.data<Element>();
auto const d_rcol = rcol.data<Element>();
// capture lcol, rcol
// and "gather" into merged_view.data()[indx_merged]
// from lcol or rcol, depending on side;
//
thrust::transform(rmm::exec_policy(stream),
row_order_.begin(),
row_order_.end(),
merged_view.begin<Element>(),
[d_lcol, d_rcol] __device__(index_type const& index_pair) {
auto const [side, index] = index_pair;
return side == side::LEFT ? d_lcol[index] : d_rcol[index];
});
// CAVEAT: conditional call below is erroneous without
// set_null_mask() call (see TODO above):
//
if (lcol.has_nulls() || rcol.has_nulls()) {
// resolve null mask:
//
materialize_bitmask(
lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream);
}
return merged_col;
}
private:
index_vector const& row_order_;
};
// specialization for strings
template <>
std::unique_ptr<column> column_merger::operator()<cudf::string_view>(
column_view const& lcol,
column_view const& rcol,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto column = strings::detail::merge<index_type>(strings_column_view(lcol),
strings_column_view(rcol),
row_order_.begin(),
row_order_.end(),
stream,
mr);
if (lcol.has_nulls() || rcol.has_nulls()) {
auto merged_view = column->mutable_view();
materialize_bitmask(
lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream);
}
return column;
}
// specialization for dictionary
template <>
std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>(
column_view const& lcol,
column_view const& rcol,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto result = cudf::dictionary::detail::merge(
cudf::dictionary_column_view(lcol), cudf::dictionary_column_view(rcol), row_order_, stream, mr);
// set the validity mask
if (lcol.has_nulls() || rcol.has_nulls()) {
auto merged_view = result->mutable_view();
materialize_bitmask(
lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream);
}
return result;
}
// specialization for lists
template <>
std::unique_ptr<column> column_merger::operator()<cudf::list_view>(
column_view const& lcol,
column_view const& rcol,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
std::vector<column_view> columns{lcol, rcol};
auto concatenated_list = cudf::lists::detail::concatenate(columns, stream, mr);
auto const iter_gather = cudf::detail::make_counting_transform_iterator(
0, [row_order = row_order_.data(), lsize = lcol.size()] __device__(auto const idx) {
auto const [side, index] = row_order[idx];
return side == side::LEFT ? index : lsize + index;
});
auto result = cudf::detail::gather(table_view{{concatenated_list->view()}},
iter_gather,
iter_gather + concatenated_list->size(),
out_of_bounds_policy::DONT_CHECK,
stream,
mr);
return std::move(result->release()[0]);
}
// specialization for structs
template <>
std::unique_ptr<column> column_merger::operator()<cudf::struct_view>(
column_view const& lcol,
column_view const& rcol,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// merge each child.
auto const lhs = structs_column_view{lcol};
auto const rhs = structs_column_view{rcol};
auto it = cudf::detail::make_counting_transform_iterator(
0, [&, merger = column_merger{row_order_}](size_type i) {
return cudf::type_dispatcher<dispatch_storage_type>(lhs.child(i).type(),
merger,
lhs.get_sliced_child(i, stream),
rhs.get_sliced_child(i, stream),
stream,
mr);
});
auto merged_children = std::vector<std::unique_ptr<column>>(it, it + lhs.num_children());
auto const merged_size = lcol.size() + rcol.size();
// materialize the output buffer
rmm::device_buffer validity =
lcol.has_nulls() || rcol.has_nulls()
? detail::create_null_mask(merged_size, mask_state::UNINITIALIZED, stream, mr)
: rmm::device_buffer{};
if (lcol.has_nulls() || rcol.has_nulls()) {
materialize_bitmask(lcol,
rcol,
static_cast<bitmask_type*>(validity.data()),
merged_size,
row_order_.data(),
stream);
}
return make_structs_column(merged_size,
std::move(merged_children),
lcol.null_count() + rcol.null_count(),
std::move(validity),
stream,
mr);
}
using table_ptr_type = std::unique_ptr<cudf::table>;
table_ptr_type merge(cudf::table_view const& left_table,
cudf::table_view const& right_table,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// collect index columns for lhs, rhs, resp.
//
cudf::table_view index_left_view{left_table.select(key_cols)};
cudf::table_view index_right_view{right_table.select(key_cols)};
bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view);
// extract merged row order according to indices:
//
auto const merged_indices = [&]() {
if (cudf::detail::has_nested_columns(left_table) or
cudf::detail::has_nested_columns(right_table)) {
return generate_merged_indices_nested(
index_left_view, index_right_view, column_order, null_precedence, nullable, stream);
} else {
return generate_merged_indices(
index_left_view, index_right_view, column_order, null_precedence, nullable, stream);
}
}();
// create merged table:
//
auto const n_cols = left_table.num_columns();
std::vector<std::unique_ptr<column>> merged_cols;
merged_cols.reserve(n_cols);
column_merger merger{merged_indices};
transform(left_table.begin(),
left_table.end(),
right_table.begin(),
std::back_inserter(merged_cols),
[&](auto const& left_col, auto const& right_col) {
return cudf::type_dispatcher<dispatch_storage_type>(
left_col.type(), merger, left_col, right_col, stream, mr);
});
return std::make_unique<cudf::table>(std::move(merged_cols));
}
struct merge_queue_item {
table_view view;
table_ptr_type table;
// Priority is a separate member to ensure that moving from an object
// does not change its priority (which would ruin the queue invariant)
cudf::size_type priority = 0;
merge_queue_item(table_view const& view, table_ptr_type&& table)
: view{view}, table{std::move(table)}, priority{-view.num_rows()}
{
}
bool operator<(merge_queue_item const& other) const { return priority < other.priority; }
};
// Helper function to ensure that moving out of the priority_queue is "atomic"
template <typename T>
T top_and_pop(std::priority_queue<T>& q)
{
auto moved = std::move(const_cast<T&>(q.top()));
q.pop();
return moved;
}
} // anonymous namespace
table_ptr_type merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); }
auto const& first_table = tables_to_merge.front();
auto const n_cols = first_table.num_columns();
CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }),
"Mismatched number of columns");
CUDF_EXPECTS(
std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }),
"Mismatched column types");
CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols");
CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols");
CUDF_EXPECTS(key_cols.size() == column_order.size(),
"Mismatched size between key_cols and column_order");
CUDF_EXPECTS(
std::accumulate(tables_to_merge.cbegin(),
tables_to_merge.cend(),
std::size_t{0},
[](auto const& running_sum, auto const& tbl) {
return running_sum + static_cast<std::size_t>(tbl.num_rows());
}) <= static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()),
"Total number of merged rows exceeds row limit");
// This utility will ensure all corresponding dictionary columns have matching keys.
// It will return any new dictionary columns created as well as updated table_views.
auto matched = cudf::dictionary::detail::match_dictionaries(
tables_to_merge, stream, rmm::mr::get_current_device_resource());
auto merge_tables = matched.second;
// A queue of (table view, table) pairs
std::priority_queue<merge_queue_item> merge_queue;
// The table pointer is null if we do not own the table (input tables)
std::for_each(merge_tables.begin(), merge_tables.end(), [&](auto const& table) {
if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type());
});
// If there is only one non-empty table_view, return its copy
if (merge_queue.size() == 1) {
return std::make_unique<cudf::table>(merge_queue.top().view, stream, mr);
}
// No inputs have rows, return a table with same columns as the first one
if (merge_queue.empty()) { return empty_like(first_table); }
// Pick the two smallest tables and merge them
// Until there is only one table left in the queue
while (merge_queue.size() > 1) {
// To delete the intermediate table at the end of the block
auto const left_table = top_and_pop(merge_queue);
// Deallocated at the end of the block
auto const right_table = top_and_pop(merge_queue);
// Only use mr for the output table
auto const& new_tbl_mr = merge_queue.empty() ? mr : rmm::mr::get_current_device_resource();
auto merged_table = merge(left_table.view,
right_table.view,
key_cols,
column_order,
null_precedence,
stream,
new_tbl_mr);
auto const merged_table_view = merged_table->view();
merge_queue.emplace(merged_table_view, std::move(merged_table));
}
return std::move(top_and_pop(merge_queue).table);
}
} // namespace detail
std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::merge(
tables_to_merge, key_cols, column_order, null_precedence, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/datetime/datetime_ops.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/datetime.hpp>
#include <cudf/detail/datetime.hpp>
#include <cudf/detail/datetime_ops.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/durations.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace datetime {
namespace detail {
enum class datetime_component {
INVALID = 0,
YEAR,
MONTH,
DAY,
WEEKDAY,
HOUR,
MINUTE,
SECOND,
MILLISECOND,
MICROSECOND,
NANOSECOND
};
enum class rounding_function {
CEIL, ///< Rounds up to the next integer multiple of the provided frequency
FLOOR, ///< Rounds down to the next integer multiple of the provided frequency
ROUND ///< Rounds to the nearest integer multiple of the provided frequency
};
template <datetime_component Component>
struct extract_component_operator {
template <typename Timestamp>
__device__ inline int16_t operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
auto days_since_epoch = floor<days>(ts);
auto time_since_midnight = ts - days_since_epoch;
if (time_since_midnight.count() < 0) { time_since_midnight += days(1); }
auto const hrs_ = [&] { return duration_cast<hours>(time_since_midnight); };
auto const mins_ = [&] { return duration_cast<minutes>(time_since_midnight) - hrs_(); };
auto const secs_ = [&] {
return duration_cast<seconds>(time_since_midnight) - hrs_() - mins_();
};
auto const millisecs_ = [&] {
return duration_cast<milliseconds>(time_since_midnight) - hrs_() - mins_() - secs_();
};
auto const microsecs_ = [&] {
return duration_cast<microseconds>(time_since_midnight) - hrs_() - mins_() - secs_() -
millisecs_();
};
auto const nanosecs_ = [&] {
return duration_cast<nanoseconds>(time_since_midnight) - hrs_() - mins_() - secs_() -
millisecs_() - microsecs_();
};
switch (Component) {
case datetime_component::YEAR:
return static_cast<int>(year_month_day(days_since_epoch).year());
case datetime_component::MONTH:
return static_cast<unsigned>(year_month_day(days_since_epoch).month());
case datetime_component::DAY:
return static_cast<unsigned>(year_month_day(days_since_epoch).day());
case datetime_component::WEEKDAY:
return year_month_weekday(days_since_epoch).weekday().iso_encoding();
case datetime_component::HOUR: return hrs_().count();
case datetime_component::MINUTE: return mins_().count();
case datetime_component::SECOND: return secs_().count();
case datetime_component::MILLISECOND: return millisecs_().count();
case datetime_component::MICROSECOND: return microsecs_().count();
case datetime_component::NANOSECOND: return nanosecs_().count();
default: return 0;
}
}
};
// This functor takes the rounding type as runtime info and dispatches to the ceil/floor/round
// function.
template <typename DurationType>
struct RoundFunctor {
template <typename Timestamp>
__device__ inline auto operator()(rounding_function round_kind, Timestamp dt)
{
switch (round_kind) {
case rounding_function::CEIL: return cuda::std::chrono::ceil<DurationType>(dt);
case rounding_function::FLOOR: return cuda::std::chrono::floor<DurationType>(dt);
case rounding_function::ROUND: return cuda::std::chrono::round<DurationType>(dt);
default: CUDF_UNREACHABLE("Unsupported rounding kind.");
}
}
};
struct RoundingDispatcher {
rounding_function round_kind;
rounding_frequency component;
RoundingDispatcher(rounding_function round_kind, rounding_frequency component)
: round_kind(round_kind), component(component)
{
}
template <typename Timestamp>
__device__ inline Timestamp operator()(Timestamp const ts) const
{
switch (component) {
case rounding_frequency::DAY:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_D>{}(round_kind, ts));
case rounding_frequency::HOUR:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_h>{}(round_kind, ts));
case rounding_frequency::MINUTE:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_m>{}(round_kind, ts));
case rounding_frequency::SECOND:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_s>{}(round_kind, ts));
case rounding_frequency::MILLISECOND:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_ms>{}(round_kind, ts));
case rounding_frequency::MICROSECOND:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_us>{}(round_kind, ts));
case rounding_frequency::NANOSECOND:
return time_point_cast<typename Timestamp::duration>(
RoundFunctor<duration_ns>{}(round_kind, ts));
default: CUDF_UNREACHABLE("Unsupported datetime rounding resolution.");
}
}
};
// Number of days until month indexed by leap year and month (0-based index)
static __device__ int16_t const days_until_month[2][13] = {
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, // For non leap years
{0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} // For leap years
};
// Round up the date to the last day of the month and return the
// date only (without the time component)
struct extract_last_day_of_month {
template <typename Timestamp>
__device__ inline timestamp_D operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
year_month_day const ymd(floor<days>(ts));
auto const ymdl = year_month_day_last{ymd.year() / ymd.month() / last};
return timestamp_D{sys_days{ymdl}};
}
};
// Extract the number of days of the month
// A similar operator to `extract_last_day_of_month`, except this returns
// an integer while the other returns a timestamp.
struct days_in_month_op {
template <typename Timestamp>
__device__ inline int16_t operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
auto const date = year_month_day(floor<days>(ts));
auto const ymdl = year_month_day_last(date.year() / date.month() / last);
return static_cast<int16_t>(unsigned{ymdl.day()});
}
};
// Extract the day number of the year present in the timestamp
struct extract_day_num_of_year {
template <typename Timestamp>
__device__ inline int16_t operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
// Only has the days - time component is chopped off, which is what we want
auto const days_since_epoch = floor<days>(ts);
auto const date = year_month_day(days_since_epoch);
return days_until_month[date.year().is_leap()][unsigned{date.month()} - 1] +
unsigned{date.day()};
}
};
// Extract the quarter to which the timestamp belongs to
struct extract_quarter_op {
template <typename Timestamp>
__device__ inline int16_t operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
// Only has the days - time component is chopped off, which is what we want
auto const days_since_epoch = floor<days>(ts);
auto const date = year_month_day(days_since_epoch);
auto const month = unsigned{date.month()};
// (x + y - 1) / y = ceil(x/y), where x and y are unsigned. x = month, y = 3
return (month + 2) / 3;
}
};
// Returns true if the year is a leap year
struct is_leap_year_op {
template <typename Timestamp>
__device__ inline bool operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
auto const days_since_epoch = floor<days>(ts);
auto const date = year_month_day(days_since_epoch);
return date.year().is_leap();
}
};
// Specific function for applying ceil/floor/round date ops
struct dispatch_round {
template <typename Timestamp>
std::enable_if_t<cudf::is_timestamp<Timestamp>(), std::unique_ptr<cudf::column>> operator()(
rounding_function round_kind,
rounding_frequency component,
cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto size = column.size();
auto output_col_type = data_type{cudf::type_to_id<Timestamp>()};
// Return an empty column if source column is empty
if (size == 0) return make_empty_column(output_col_type);
auto output = make_fixed_width_column(output_col_type,
size,
cudf::detail::copy_bitmask(column, stream, mr),
column.null_count(),
stream,
mr);
thrust::transform(rmm::exec_policy(stream),
column.begin<Timestamp>(),
column.end<Timestamp>(),
output->mutable_view().begin<Timestamp>(),
RoundingDispatcher{round_kind, component});
output->set_null_count(column.null_count());
return output;
}
template <typename Timestamp, typename... Args>
std::enable_if_t<!cudf::is_timestamp<Timestamp>(), std::unique_ptr<cudf::column>> operator()(
Args&&...)
{
CUDF_FAIL("Must be cudf::timestamp");
}
};
// Apply the functor for every element/row in the input column to create the output column
template <typename TransformFunctor, typename OutputColT>
struct launch_functor {
column_view input;
mutable_column_view output;
launch_functor(column_view inp, mutable_column_view out) : input(inp), output(out) {}
template <typename Element>
std::enable_if_t<!cudf::is_timestamp_t<Element>::value, void> operator()(
rmm::cuda_stream_view stream) const
{
CUDF_FAIL("Cannot extract datetime component from non-timestamp column.");
}
template <typename Timestamp>
std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, void> operator()(
rmm::cuda_stream_view stream) const
{
thrust::transform(rmm::exec_policy(stream),
input.begin<Timestamp>(),
input.end<Timestamp>(),
output.begin<OutputColT>(),
TransformFunctor{});
}
};
// Create an output column by applying the functor to every element from the input column
template <typename TransformFunctor, cudf::type_id OutputColCudfT>
std::unique_ptr<column> apply_datetime_op(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_timestamp(column.type()), "Column type should be timestamp");
auto size = column.size();
auto output_col_type = data_type{OutputColCudfT};
// Return an empty column if source column is empty
if (size == 0) return make_empty_column(output_col_type);
auto output = make_fixed_width_column(output_col_type,
size,
cudf::detail::copy_bitmask(column, stream, mr),
column.null_count(),
stream,
mr);
auto launch = launch_functor<TransformFunctor, cudf::id_to_type<OutputColCudfT>>{
column, static_cast<mutable_column_view>(*output)};
type_dispatcher(column.type(), launch, stream);
return output;
}
struct add_calendrical_months_functor {
template <typename Element, typename... Args>
std::enable_if_t<!cudf::is_timestamp_t<Element>::value, std::unique_ptr<column>> operator()(
Args&&...) const
{
CUDF_FAIL("Cannot extract datetime component from non-timestamp column.");
}
template <typename Timestamp, typename MonthIterator>
std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, std::unique_ptr<column>> operator()(
column_view timestamp_column,
MonthIterator months_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto size = timestamp_column.size();
auto output_col_type = timestamp_column.type();
// Return an empty column if source column is empty
if (size == 0) return make_empty_column(output_col_type);
// The nullmask of `output` cannot be determined without information from
// the `months` type (column or scalar). Therefore, it is initialized as
// `UNALLOCATED` and assigned at a later stage.
auto output =
make_fixed_width_column(output_col_type, size, mask_state::UNALLOCATED, stream, mr);
auto output_mview = output->mutable_view();
thrust::transform(rmm::exec_policy(stream),
timestamp_column.begin<Timestamp>(),
timestamp_column.end<Timestamp>(),
months_begin,
output->mutable_view().begin<Timestamp>(),
[] __device__(auto& timestamp, auto& months) {
return add_calendrical_months_with_scale_back(
timestamp, cuda::std::chrono::months{months});
});
return output;
}
};
std::unique_ptr<column> add_calendrical_months(column_view const& timestamp_column,
column_view const& months_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_timestamp(timestamp_column.type()), "Column type should be timestamp");
CUDF_EXPECTS(
months_column.type().id() == type_id::INT16 or months_column.type().id() == type_id::INT32,
"Months column type should be INT16 or INT32.");
CUDF_EXPECTS(timestamp_column.size() == months_column.size(),
"Timestamp and months column should be of the same size");
auto const months_begin_iter =
cudf::detail::indexalator_factory::make_input_iterator(months_column);
auto output = type_dispatcher(timestamp_column.type(),
add_calendrical_months_functor{},
timestamp_column,
months_begin_iter,
stream,
mr);
auto [output_null_mask, null_count] =
cudf::detail::bitmask_and(table_view{{timestamp_column, months_column}}, stream, mr);
output->set_null_mask(std::move(output_null_mask), null_count);
return output;
}
std::unique_ptr<column> add_calendrical_months(column_view const& timestamp_column,
scalar const& months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_timestamp(timestamp_column.type()), "Column type should be timestamp");
CUDF_EXPECTS(months.type().id() == type_id::INT16 or months.type().id() == type_id::INT32,
"Months type should be INT16 or INT32");
if (months.is_valid(stream)) {
auto const months_begin_iter = thrust::make_permutation_iterator(
cudf::detail::indexalator_factory::make_input_iterator(months),
thrust::make_constant_iterator(0));
auto output = type_dispatcher(timestamp_column.type(),
add_calendrical_months_functor{},
timestamp_column,
months_begin_iter,
stream,
mr);
output->set_null_mask(cudf::detail::copy_bitmask(timestamp_column, stream, mr),
timestamp_column.null_count());
return output;
} else {
return make_timestamp_column(
timestamp_column.type(), timestamp_column.size(), mask_state::ALL_NULL, stream, mr);
}
}
std::unique_ptr<column> round_general(rounding_function round_kind,
rounding_frequency component,
column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(
column.type(), dispatch_round{}, round_kind, component, column, stream, mr);
}
std::unique_ptr<column> extract_year(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::YEAR>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_month(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::MONTH>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_day(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::DAY>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_weekday(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::WEEKDAY>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_hour(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::HOUR>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_minute(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::MINUTE>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_second(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::SECOND>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_millisecond_fraction(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::MILLISECOND>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_microsecond_fraction(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::MICROSECOND>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_nanosecond_fraction(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<
detail::extract_component_operator<detail::datetime_component::NANOSECOND>,
cudf::type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> last_day_of_month(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<detail::extract_last_day_of_month,
cudf::type_id::TIMESTAMP_DAYS>(column, stream, mr);
}
std::unique_ptr<column> day_of_year(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::apply_datetime_op<detail::extract_day_num_of_year, cudf::type_id::INT16>(
column, stream, mr);
}
std::unique_ptr<column> is_leap_year(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return apply_datetime_op<is_leap_year_op, type_id::BOOL8>(column, stream, mr);
}
std::unique_ptr<column> days_in_month(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return apply_datetime_op<days_in_month_op, type_id::INT16>(column, stream, mr);
}
std::unique_ptr<column> extract_quarter(column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return apply_datetime_op<extract_quarter_op, type_id::INT16>(column, stream, mr);
}
} // namespace detail
std::unique_ptr<column> ceil_datetimes(column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::round_general(
detail::rounding_function::CEIL, freq, column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> floor_datetimes(column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::round_general(
detail::rounding_function::FLOOR, freq, column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> round_datetimes(column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::round_general(
detail::rounding_function::ROUND, freq, column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_year(column_view const& column, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_year(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_month(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_month(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_day(column_view const& column, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_day(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_weekday(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_weekday(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_hour(column_view const& column, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_hour(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_minute(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_minute(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_second(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_second(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_millisecond_fraction(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_millisecond_fraction(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_microsecond_fraction(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_microsecond_fraction(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_nanosecond_fraction(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_nanosecond_fraction(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> last_day_of_month(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::last_day_of_month(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> day_of_year(column_view const& column, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::day_of_year(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamp_column,
cudf::column_view const& months_column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::add_calendrical_months(
timestamp_column, months_column, cudf::get_default_stream(), mr);
}
std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamp_column,
cudf::scalar const& months,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::add_calendrical_months(timestamp_column, months, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> is_leap_year(column_view const& column, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_leap_year(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> days_in_month(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::days_in_month(column, cudf::get_default_stream(), mr);
}
std::unique_ptr<column> extract_quarter(column_view const& column,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_quarter(column, cudf::get_default_stream(), mr);
}
} // namespace datetime
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/datetime/timezone.cpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/timezone.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table.hpp>
#include <algorithm>
#include <filesystem>
#include <fstream>
namespace cudf {
namespace {
constexpr uint32_t tzif_magic = ('T' << 0) | ('Z' << 8) | ('i' << 16) | ('f' << 24);
std::string const tzif_system_directory = "/usr/share/zoneinfo/";
#pragma pack(push, 1)
/**
* @brief 32-bit TZif header
*/
struct timezone_file_header {
uint32_t magic; ///< "TZif"
uint8_t version; ///< 0:version1, '2':version2, '3':version3
uint8_t reserved15[15]; ///< unused, reserved for future use
uint32_t isutccnt; ///< number of UTC/local indicators contained in the body
uint32_t isstdcnt; ///< number of standard/wall indicators contained in the body
uint32_t leapcnt; ///< number of leap second records contained in the body
uint32_t timecnt; ///< number of transition times contained in the body
uint32_t typecnt; ///< number of local time type Records contained in the body - MUST NOT be zero
uint32_t charcnt; ///< total number of octets used by the set of time zone designations contained
///< in the body
};
struct localtime_type_record_s {
int32_t utcoff; // number of seconds to be added to UTC in order to determine local time
uint8_t isdst; // 0:standard time, 1:Daylight Savings Time (DST)
uint8_t desigidx; // index into the series of time zone designation characters
};
struct dst_transition_s {
char type; // Transition type ('J','M' or day)
int month; // Month of transition
int week; // Week of transition
int day; // Day of transition
int time; // Time of day (seconds)
};
#pragma pack(pop)
struct timezone_file {
timezone_file_header header;
bool is_header_from_64bit = false;
std::vector<int64_t> transition_times;
std::vector<uint8_t> ttime_idx;
std::vector<localtime_type_record_s> ttype;
std::vector<char> posix_tz_string;
[[nodiscard]] auto timecnt() const { return header.timecnt; }
[[nodiscard]] auto typecnt() const { return header.typecnt; }
// Based on https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html
static constexpr auto leap_second_rec_size(bool is_64bit) noexcept
{
return (is_64bit ? sizeof(uint64_t) : sizeof(uint32_t)) + sizeof(uint32_t);
}
static constexpr auto file_content_size_32(timezone_file_header const& header) noexcept
{
return header.timecnt * sizeof(uint32_t) + // transition times
header.timecnt * sizeof(uint8_t) + // transition time index
header.typecnt * sizeof(localtime_type_record_s) + // local time type records
header.charcnt * sizeof(uint8_t) + // time zone designations
header.leapcnt * leap_second_rec_size(false) + // leap second records
header.isstdcnt * sizeof(uint8_t) + // standard/wall indicators
header.isutccnt * sizeof(uint8_t); // UTC/local indicators
}
/**
* @brief Used because little-endian platform in assumed.
*/
void header_to_little_endian()
{
header.isutccnt = __builtin_bswap32(header.isutccnt);
header.isstdcnt = __builtin_bswap32(header.isstdcnt);
header.leapcnt = __builtin_bswap32(header.leapcnt);
header.timecnt = __builtin_bswap32(header.timecnt);
header.typecnt = __builtin_bswap32(header.typecnt);
header.charcnt = __builtin_bswap32(header.charcnt);
}
void read_header(std::ifstream& input_file, size_t file_size)
{
input_file.read(reinterpret_cast<char*>(&header), sizeof(header));
CUDF_EXPECTS(!input_file.fail() && header.magic == tzif_magic,
"Error reading time zones file header.");
header_to_little_endian();
// Check for 64-bit header
if (header.version != 0) {
if (file_content_size_32(header) + sizeof(header) < file_size) {
// skip the 32-bit content
input_file.seekg(file_content_size_32(header), std::ios_base::cur);
// read the 64-bit header
input_file.read(reinterpret_cast<char*>(&header), sizeof(header));
header_to_little_endian();
is_header_from_64bit = true;
}
}
CUDF_EXPECTS(
header.typecnt > 0 && header.typecnt <= file_size / sizeof(localtime_type_record_s),
"Invalid number of time types in timezone file.");
CUDF_EXPECTS(header.timecnt <= file_size,
"Number of transition times is larger than the file size.");
}
timezone_file(std::optional<std::string_view> tzif_dir, std::string_view timezone_name)
{
using std::ios_base;
// Open the input file
auto const tz_filename =
std::filesystem::path{tzif_dir.value_or(tzif_system_directory)} / timezone_name;
std::ifstream fin;
fin.open(tz_filename, ios_base::in | ios_base::binary | ios_base::ate);
CUDF_EXPECTS(fin, "Failed to open the timezone file.");
auto const file_size = fin.tellg();
fin.seekg(0);
read_header(fin, file_size);
// Read transition times (convert from 32-bit to 64-bit if necessary)
transition_times.resize(timecnt());
if (is_header_from_64bit) {
fin.read(reinterpret_cast<char*>(transition_times.data()),
transition_times.size() * sizeof(int64_t));
for (auto& tt : transition_times) {
tt = __builtin_bswap64(tt);
}
} else {
std::vector<int32_t> tt32(timecnt());
fin.read(reinterpret_cast<char*>(tt32.data()), tt32.size() * sizeof(int32_t));
std::transform(
tt32.cbegin(), tt32.cend(), std::back_inserter(transition_times), [](auto& tt) {
return __builtin_bswap32(tt);
});
}
ttime_idx.resize(timecnt());
fin.read(reinterpret_cast<char*>(ttime_idx.data()), timecnt() * sizeof(uint8_t));
// Read time types
ttype.resize(typecnt());
fin.read(reinterpret_cast<char*>(ttype.data()), typecnt() * sizeof(localtime_type_record_s));
CUDF_EXPECTS(!fin.fail(), "Failed to read time types from the time zone file.");
for (uint32_t i = 0; i < typecnt(); i++) {
ttype[i].utcoff = __builtin_bswap32(ttype[i].utcoff);
}
// Read posix TZ string
fin.seekg(header.charcnt + header.leapcnt * leap_second_rec_size(is_header_from_64bit) +
header.isstdcnt + header.isutccnt,
ios_base::cur);
auto const file_pos = fin.tellg();
if (file_size - file_pos > 1) {
posix_tz_string.resize(file_size - file_pos);
fin.read(posix_tz_string.data(), file_size - file_pos);
}
}
};
/**
* @brief Posix TZ parser
*/
template <class Container>
class posix_parser {
public:
posix_parser(Container const& tz_string) : cur{tz_string.begin()}, end{tz_string.end()} {}
/**
* @brief Advances the parser past a name from the posix TZ string.
*/
void skip_name();
/**
* @brief Parses a number from the posix TZ string.
*
* @return Parsed number
*/
int64_t parse_number();
/**
* @brief Parses a UTC offset from the posix TZ string.
*
* @return Parsed offset
*/
int32_t parse_offset();
/**
* @brief Parses a DST transition time from the posix TZ string.
*
* @return Parsed transition time
*/
dst_transition_s parse_transition();
/**
* @brief Returns the remaining number of characters in the input.
*/
auto remaining_char_cnt() const { return end - cur; }
/**
* @brief Returns the next character in the input.
*/
[[nodiscard]] char next_character() const { return *cur; }
private:
typename Container::const_iterator cur;
typename Container::const_iterator const end;
};
/**
* @brief Skips the next name token.
*
* Name can be a string of letters, such as EST, or an arbitrary string surrounded by angle
* brackets, such as <UTC-05>
*/
template <class Container>
void posix_parser<Container>::skip_name()
{
cur = std::find_if(cur, end, [](auto c) {
return std::isdigit(c) || c == '-' || c == ',' || c == '+' || c == '<';
});
if (*cur == '<') cur = std::next(std::find(cur, end, '>'));
}
template <class Container>
int64_t posix_parser<Container>::parse_number()
{
int64_t v = 0;
while (cur < end) {
auto const c = *cur - '0';
if (c > 9 || c < 0) { break; }
v = v * 10 + c;
++cur;
}
return v;
}
template <class Container>
int32_t posix_parser<Container>::parse_offset()
{
CUDF_EXPECTS(cur < end, "Unexpected end of input stream");
auto const sign = *cur;
cur += (sign == '-' || sign == '+');
auto const hours = parse_number();
auto scale = 60 * 60;
auto total_seconds = hours * scale;
// Parse minutes and seconds, if present
while (cur < end && scale > 1 && *cur == ':') {
// Skip the ':' character
++cur;
// Scale becomes 60, for minutes, and then 1, for seconds
scale /= 60;
total_seconds += parse_number() * scale;
}
return (sign == '-') ? -total_seconds : total_seconds;
}
template <class Container>
dst_transition_s posix_parser<Container>::parse_transition()
{
CUDF_EXPECTS(cur < end, "Unexpected end of input stream");
// Transition at 2AM by default
int32_t time = 2 * 60 * 60;
if (cur + 2 <= end && *cur == ',') {
char const type = cur[1];
int month = 0;
int week = 0;
int day = 0;
cur += (type == 'M' || type == 'J') ? 2 : 1;
if (type == 'M') {
month = parse_number();
if (cur < end && *cur == '.') {
++cur;
week = parse_number();
if (cur < end && *cur == '.') {
++cur;
day = parse_number();
}
}
} else {
day = parse_number();
}
if (cur < end && *cur == '/') {
++cur;
time = parse_offset();
}
return {type, month, week, day, time};
}
return {0, 0, 0, 0, time};
}
/**
* @brief Returns the number of days in a month.
*/
static int days_in_month(int month, bool is_leap_year)
{
CUDF_EXPECTS(month > 0 && month <= 12, "Invalid month");
if (month == 2) return 28 + is_leap_year;
return 30 + ((0b1010110101010 >> month) & 1);
}
/**
* @brief Converts a daylight saving transition time to a number of seconds.
*
* @param trans transition day information
* @param year year of transition
*
* @return transition time in seconds from the beginning of the year
*/
static int64_t get_transition_time(dst_transition_s const& trans, int year)
{
auto day = trans.day;
auto const is_leap = cuda::std::chrono::year{year}.is_leap();
if (trans.type == 'M') {
auto const month = std::min(std::max(trans.month, 1), 12);
auto week = std::min(std::max(trans.week, 1), 52);
// Year-to-year day adjustment
auto const adjusted_month = (month + 9) % 12 + 1;
auto const adjusted_year = year - (month <= 2);
auto day_of_week =
((26 * adjusted_month - 2) / 10 + 1 + (adjusted_year % 100) + (adjusted_year % 100) / 4 +
(adjusted_year / 400) - 2 * (adjusted_year / 100)) %
7;
if (day_of_week < 0) { day_of_week += 7; }
day = (day - day_of_week + 7) % 7;
// Add weeks
while (week > 1 && day + 7 < days_in_month(month, is_leap)) {
week--;
day += 7;
}
// Add months
for (int m = 1; m < month; m++) {
day += days_in_month(m, is_leap);
}
} else if (trans.type == 'J') {
// Account for 29th of February on leap years
day += (day > 31 + 29 && is_leap);
}
return trans.time + cuda::std::chrono::duration_cast<duration_s>(duration_D{day}).count();
}
} // namespace
std::unique_ptr<table> make_timezone_transition_table(std::optional<std::string_view> tzif_dir,
std::string_view timezone_name,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::make_timezone_transition_table(
tzif_dir, timezone_name, cudf::get_default_stream(), mr);
}
namespace detail {
std::unique_ptr<table> make_timezone_transition_table(std::optional<std::string_view> tzif_dir,
std::string_view timezone_name,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (timezone_name == "UTC" || timezone_name.empty()) {
// Return an empty table for UTC
return std::make_unique<cudf::table>();
}
timezone_file const tzf(tzif_dir, timezone_name);
std::vector<timestamp_s::rep> transition_times(1);
std::vector<duration_s::rep> offsets(1);
// One ancient rule entry, one per TZ file entry, 2 entries per year in the future cycle
transition_times.reserve(1 + tzf.timecnt() + solar_cycle_entry_count);
offsets.reserve(1 + tzf.timecnt() + solar_cycle_entry_count);
size_t earliest_std_idx = 0;
for (size_t t = 0; t < tzf.timecnt(); t++) {
auto const ttime = tzf.transition_times[t];
auto const idx = tzf.ttime_idx[t];
CUDF_EXPECTS(idx < tzf.typecnt(), "Out-of-range type index");
auto const utcoff = tzf.ttype[idx].utcoff;
transition_times.push_back(ttime);
offsets.push_back(utcoff);
if (!earliest_std_idx && !tzf.ttype[idx].isdst) {
earliest_std_idx = transition_times.size() - 1;
}
}
if (tzf.timecnt() != 0) {
if (!earliest_std_idx) { earliest_std_idx = 1; }
transition_times[0] = transition_times[earliest_std_idx];
offsets[0] = offsets[earliest_std_idx];
} else {
if (tzf.typecnt() == 0 || tzf.ttype[0].utcoff == 0) {
// No transitions, offset is zero; Table would be a no-op.
// Return an empty table to speed up parsing.
return std::make_unique<cudf::table>();
}
// No transitions to use for the time/offset - use the first offset and apply to all timestamps
transition_times[0] = std::numeric_limits<int64_t>::max();
offsets[0] = tzf.ttype[0].utcoff;
}
// Generate entries for times after the last transition
auto future_std_offset = offsets[tzf.timecnt()];
auto future_dst_offset = future_std_offset;
dst_transition_s dst_start{};
dst_transition_s dst_end{};
if (!tzf.posix_tz_string.empty()) {
posix_parser<decltype(tzf.posix_tz_string)> parser(tzf.posix_tz_string);
parser.skip_name();
future_std_offset = -parser.parse_offset();
if (parser.remaining_char_cnt() > 1) {
// Parse Daylight Saving Time information
parser.skip_name();
if (parser.remaining_char_cnt() > 0 && parser.next_character() != ',') {
future_dst_offset = -parser.parse_offset();
} else {
future_dst_offset = future_std_offset + 60 * 60;
}
dst_start = parser.parse_transition();
dst_end = parser.parse_transition();
} else {
future_dst_offset = future_std_offset;
}
}
// Add entries to fill the transition cycle
int64_t year_timestamp = 0;
for (int32_t year = 1970; year < 1970 + solar_cycle_years; ++year) {
auto const dst_start_time = get_transition_time(dst_start, year);
auto const dst_end_time = get_transition_time(dst_end, year);
// Two entries per year, since there are two transitions
transition_times.push_back(year_timestamp + dst_start_time - future_std_offset);
offsets.push_back(future_dst_offset);
transition_times.push_back(year_timestamp + dst_end_time - future_dst_offset);
offsets.push_back(future_std_offset);
// Swap the newly added transitions if in descending order
if (transition_times.rbegin()[1] > transition_times.rbegin()[0]) {
std::swap(transition_times.rbegin()[0], transition_times.rbegin()[1]);
std::swap(offsets.rbegin()[0], offsets.rbegin()[1]);
}
year_timestamp += cuda::std::chrono::duration_cast<duration_s>(
duration_D{365 + cuda::std::chrono::year{year}.is_leap()})
.count();
}
CUDF_EXPECTS(transition_times.size() == offsets.size(),
"Error reading TZif file for timezone " + std::string{timezone_name});
std::vector<timestamp_s> ttimes_typed;
ttimes_typed.reserve(transition_times.size());
std::transform(transition_times.cbegin(),
transition_times.cend(),
std::back_inserter(ttimes_typed),
[](auto ts) { return timestamp_s{duration_s{ts}}; });
std::vector<duration_s> offsets_typed;
offsets_typed.reserve(offsets.size());
std::transform(offsets.cbegin(), offsets.cend(), std::back_inserter(offsets_typed), [](auto ts) {
return duration_s{ts};
});
auto d_ttimes = cudf::detail::make_device_uvector_async(ttimes_typed, stream, mr);
auto d_offsets = cudf::detail::make_device_uvector_async(offsets_typed, stream, mr);
std::vector<std::unique_ptr<column>> tz_table_columns;
tz_table_columns.emplace_back(
std::make_unique<cudf::column>(std::move(d_ttimes), rmm::device_buffer{}, 0));
tz_table_columns.emplace_back(
std::make_unique<cudf::column>(std::move(d_offsets), rmm::device_buffer{}, 0));
// Need to finish copies before transition_times and offsets go out of scope
stream.synchronize();
return std::make_unique<cudf::table>(std::move(tz_table_columns));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/structs/structs_column_view.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
namespace cudf {
structs_column_view::structs_column_view(column_view const& rhs) : column_view{rhs}
{
CUDF_EXPECTS(type().id() == type_id::STRUCT, "structs_column_view only supports struct columns");
}
column_view structs_column_view::parent() const { return *this; }
column_view structs_column_view::get_sliced_child(int index, rmm::cuda_stream_view stream) const
{
std::vector<column_view> children;
children.reserve(child(index).num_children());
for (size_type i = 0; i < child(index).num_children(); i++) {
children.push_back(child(index).child(i));
}
return column_view{
child(index).type(),
size(),
child(index).head<uint8_t>(),
child(index).null_mask(),
child(index).null_count()
? cudf::detail::null_count(child(index).null_mask(), offset(), offset() + size(), stream)
: 0,
offset(),
children};
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/structs/structs_column_factories.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <memory>
namespace cudf {
/// Column factory that adopts child columns.
std::unique_ptr<cudf::column> make_structs_column(
size_type num_rows,
std::vector<std::unique_ptr<column>>&& child_columns,
size_type null_count,
rmm::device_buffer&& null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(null_count <= 0 || !null_mask.is_empty(),
"Struct column with nulls must be nullable.");
CUDF_EXPECTS(std::all_of(child_columns.begin(),
child_columns.end(),
[&](auto const& child_col) { return num_rows == child_col->size(); }),
"Child columns must have the same number of rows as the Struct column.");
if (!null_mask.is_empty()) {
for (auto& child : child_columns) {
child = structs::detail::superimpose_nulls(static_cast<bitmask_type const*>(null_mask.data()),
null_count,
std::move(child),
stream,
mr);
}
}
return std::make_unique<column>(cudf::data_type{type_id::STRUCT},
num_rows,
rmm::device_buffer{}, // Empty data buffer. Structs hold no data.
std::move(null_mask),
null_count,
std::move(child_columns));
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/structs/utilities.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
namespace cudf::structs::detail {
/**
* @copydoc cudf::structs::detail::extract_ordered_struct_children
*/
std::vector<std::vector<column_view>> extract_ordered_struct_children(
host_span<column_view const> struct_cols, rmm::cuda_stream_view stream)
{
auto const num_children = struct_cols[0].num_children();
auto const num_cols = static_cast<size_type>(struct_cols.size());
std::vector<std::vector<column_view>> result;
result.reserve(num_children);
for (size_type child_index = 0; child_index < num_children; child_index++) {
std::vector<column_view> children;
children.reserve(num_cols);
for (size_type col_index = 0; col_index < num_cols; col_index++) {
structs_column_view scv(struct_cols[col_index]);
// all inputs must have the same # of children and they must all be of the
// same type.
CUDF_EXPECTS(struct_cols[0].num_children() == scv.num_children(),
"Mismatch in number of children during struct concatenate");
CUDF_EXPECTS(struct_cols[0].child(child_index).type() == scv.child(child_index).type(),
"Mismatch in child types during struct concatenate");
children.push_back(scv.get_sliced_child(child_index, stream));
}
result.push_back(std::move(children));
}
return result;
}
namespace {
/**
* @brief Check whether the specified column is of type `STRUCT`.
*/
bool is_struct(cudf::column_view const& col) { return col.type().id() == type_id::STRUCT; }
} // namespace
bool is_or_has_nested_lists(cudf::column_view const& col)
{
auto is_list = [](cudf::column_view const& col) { return col.type().id() == type_id::LIST; };
return is_list(col) || std::any_of(col.child_begin(), col.child_end(), is_or_has_nested_lists);
}
/**
* @brief Flattens struct columns to constituent non-struct columns in the input table.
*
*/
struct table_flattener {
table_view input;
std::vector<order> const& column_order;
std::vector<null_order> const& null_precedence;
column_nullability nullability;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
temporary_nullable_data nullable_data;
std::vector<std::unique_ptr<column>> validity_as_column;
std::vector<column_view> flat_columns;
std::vector<order> flat_column_order;
std::vector<null_order> flat_null_precedence;
table_flattener(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
column_nullability nullability,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: column_order{column_order},
null_precedence{null_precedence},
nullability{nullability},
stream{stream},
mr{mr}
{
superimpose_nulls(input);
}
/**
* @brief Pushes down nulls from struct columns to children, saves the resulting
* column to `input`, and generated null masks to `superimposed_nullmasks`.
*/
void superimpose_nulls(table_view const& input_table)
{
auto [table, tmp_nullable_data] = push_down_nulls(input_table, stream, mr);
this->input = std::move(table);
this->nullable_data = std::move(tmp_nullable_data);
}
// Convert null_mask to BOOL8 columns and flatten the struct children in order.
void flatten_struct_column(structs_column_view const& col,
order col_order,
null_order col_null_order)
{
// Even if it is not required to extract the bitmask to a separate column,
// we should always do that if the structs column has any null element.
//
// In addition, we should check for null by calling to `has_nulls()`, not `nullable()`.
// This is because when comparing structs columns, if one column has bitmask while the other
// does not (and both columns do not have any null element) then flattening them using
// `nullable()` will result in tables with different number of columns.
//
// Notice that, for comparing structs columns when one column has null while the other
// doesn't, `nullability` must be passed in with value `column_nullability::FORCE` to make
// sure the flattening results are tables having the same number of columns.
if (nullability == column_nullability::FORCE || col.has_nulls()) {
validity_as_column.push_back(cudf::detail::is_valid(col, stream, mr));
if (col.has_nulls()) {
// copy bitmask is needed only if the column has null
validity_as_column.back()->set_null_mask(cudf::detail::copy_bitmask(col, stream, mr),
col.null_count());
}
flat_columns.push_back(validity_as_column.back()->view());
if (not column_order.empty()) { flat_column_order.push_back(col_order); } // doesn't matter.
if (not null_precedence.empty()) { flat_null_precedence.push_back(col_null_order); }
}
for (decltype(col.num_children()) i = 0; i < col.num_children(); ++i) {
auto const& child = col.get_sliced_child(i, stream);
if (child.type().id() == type_id::STRUCT) {
flatten_struct_column(structs_column_view{child}, col_order, col_null_order);
} else {
flat_columns.push_back(child);
if (not column_order.empty()) flat_column_order.push_back(col_order);
if (not null_precedence.empty()) flat_null_precedence.push_back(col_null_order);
}
}
}
// Note: possibly expand for flattening list columns too.
/**
* @copydoc flattened_table
*
* @return tuple with flattened table, flattened column order, flattened null precedence,
* vector of boolean columns (struct validity).
*/
auto operator()()
{
for (auto i = 0; i < input.num_columns(); ++i) {
auto const& col = input.column(i);
if (col.type().id() == type_id::STRUCT) {
flatten_struct_column(structs_column_view{col},
(column_order.empty() ? order() : column_order[i]),
(null_precedence.empty() ? null_order() : null_precedence[i]));
} else {
flat_columns.push_back(col);
if (not column_order.empty()) flat_column_order.push_back(column_order[i]);
if (not null_precedence.empty()) flat_null_precedence.push_back(null_precedence[i]);
}
}
return std::make_unique<flattened_table>(table_view{flat_columns},
std::move(flat_column_order),
std::move(flat_null_precedence),
std::move(validity_as_column),
std::move(nullable_data));
}
};
std::unique_ptr<flattened_table> flatten_nested_columns(
table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
column_nullability nullability,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const has_struct = std::any_of(input.begin(), input.end(), is_struct);
if (not has_struct) {
return std::make_unique<flattened_table>(input,
column_order,
null_precedence,
std::vector<std::unique_ptr<column>>{},
temporary_nullable_data{});
}
return table_flattener{input, column_order, null_precedence, nullability, stream, mr}();
}
namespace {
/**
* @brief Superimpose the given null mask into the input column without any sanitization for
* non-empty nulls.
*
* @copydoc cudf::structs::detail::superimpose_nulls
*/
std::unique_ptr<column> superimpose_nulls_no_sanitize(bitmask_type const* null_mask,
size_type null_count,
std::unique_ptr<column>&& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input->type().id() == cudf::type_id::EMPTY) {
// EMPTY columns should not have a null mask,
// so don't superimpose null mask on empty columns.
return std::move(input);
}
auto const num_rows = input->size();
if (!input->nullable()) {
input->set_null_mask(cudf::detail::copy_bitmask(null_mask, 0, num_rows, stream, mr),
null_count);
} else {
auto current_mask = input->mutable_view().null_mask();
std::vector<bitmask_type const*> masks{reinterpret_cast<bitmask_type const*>(null_mask),
reinterpret_cast<bitmask_type const*>(current_mask)};
std::vector<size_type> begin_bits{0, 0};
auto const valid_count = cudf::detail::inplace_bitmask_and(
device_span<bitmask_type>(current_mask, num_bitmask_words(num_rows)),
masks,
begin_bits,
num_rows,
stream);
auto const new_null_count = num_rows - valid_count;
input->set_null_count(new_null_count);
}
// If the input is also a struct, repeat for all its children. Otherwise just return.
if (input->type().id() != cudf::type_id::STRUCT) { return std::move(input); }
auto const current_mask = input->view().null_mask();
auto const new_null_count = input->null_count(); // this was just computed in the step above
auto content = input->release();
// Build new children columns.
std::for_each(content.children.begin(),
content.children.end(),
[current_mask, new_null_count, stream, mr](auto& child) {
child = superimpose_nulls_no_sanitize(
current_mask, new_null_count, std::move(child), stream, mr);
});
// Replace the children columns.
return cudf::make_structs_column(num_rows,
std::move(content.children),
new_null_count,
std::move(*content.null_mask),
stream,
mr);
}
/**
* @brief Push down nulls from the given input column into its children columns without any
* sanitization for non-empty nulls.
*
* @copydoc cudf::structs::detail::push_down_nulls
*/
std::pair<column_view, temporary_nullable_data> push_down_nulls_no_sanitize(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
auto ret_nullable_data = temporary_nullable_data{};
if (input.type().id() != type_id::STRUCT) {
// NOOP for non-STRUCT columns.
return {input, std::move(ret_nullable_data)};
}
auto const structs_view = structs_column_view{input};
// Function to rewrite child null mask.
auto const child_with_new_mask = [&](auto const& child_idx) {
auto child = structs_view.get_sliced_child(child_idx, stream);
// If struct is not nullable, child null mask is retained. NOOP.
if (not structs_view.nullable()) { return child; }
auto parent_child_null_masks =
std::vector<cudf::bitmask_type const*>{structs_view.null_mask(), child.null_mask()};
auto [new_child_mask, null_count] = [&] {
if (not child.nullable()) {
// Adopt parent STRUCT's null mask.
return std::pair{structs_view.null_mask(), 0};
}
// Both STRUCT and child are nullable. AND() for the child's new null mask.
//
// Note: ANDing only [offset(), offset()+size()) would not work. The null-mask produced thus
// would start at offset=0. The column-view attempts to apply its offset() to both the _data
// and the _null_mask(). It would be better to AND the bits from the beginning, and apply
// offset() uniformly.
// Alternatively, one could construct a big enough buffer, and use inplace_bitwise_and.
auto [new_mask, null_count] = cudf::detail::bitmask_and(parent_child_null_masks,
std::vector<size_type>{0, 0},
child.offset() + child.size(),
stream,
mr);
ret_nullable_data.new_null_masks.push_back(std::move(new_mask));
return std::pair{
reinterpret_cast<bitmask_type const*>(ret_nullable_data.new_null_masks.back().data()),
null_count};
}();
return column_view(child.type(),
child.size(),
child.head(),
new_child_mask,
null_count,
child.offset(),
std::vector<column_view>{child.child_begin(), child.child_end()});
};
auto const child_begin =
thrust::make_transform_iterator(thrust::make_counting_iterator(0), child_with_new_mask);
auto const child_end = child_begin + structs_view.num_children();
auto ret_children = std::vector<column_view>{};
std::for_each(child_begin, child_end, [&](auto const& child) {
auto [processed_child, child_nullable_data] = push_down_nulls_no_sanitize(child, stream, mr);
ret_children.emplace_back(std::move(processed_child));
ret_nullable_data.emplace_back(std::move(child_nullable_data));
});
// Make column view out of newly constructed column_views, and all the validity buffers.
return std::pair{column_view(input.type(),
input.size(),
nullptr,
input.null_mask(),
input.null_count(), // Alternatively, postpone.
input.offset(),
ret_children),
std::move(ret_nullable_data)};
}
} // namespace
void temporary_nullable_data::emplace_back(temporary_nullable_data&& other)
{
auto const move_append = [](auto& dst, auto& src) {
dst.insert(dst.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end()));
};
move_append(new_null_masks, other.new_null_masks);
move_append(new_columns, other.new_columns);
}
std::unique_ptr<column> superimpose_nulls(bitmask_type const* null_mask,
size_type null_count,
std::unique_ptr<column>&& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
input = superimpose_nulls_no_sanitize(null_mask, null_count, std::move(input), stream, mr);
if (auto const input_view = input->view(); cudf::detail::has_nonempty_nulls(input_view, stream)) {
// We can't call `purge_nonempty_nulls` for individual child column(s) that need to be
// sanitized. Instead, we have to call it from the top level column.
// This is to make sure all the columns (top level + all children) have consistent offsets.
// Otherwise, the sanitized children may have offsets that are different from the others and
// also different from the parent column, causing data corruption.
return cudf::detail::purge_nonempty_nulls(input_view, stream, mr);
}
return std::move(input);
}
std::pair<column_view, temporary_nullable_data> push_down_nulls(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output = push_down_nulls_no_sanitize(input, stream, mr);
if (auto const output_view = output.first;
cudf::detail::has_nonempty_nulls(output_view, stream)) {
output.second.new_columns.emplace_back(
cudf::detail::purge_nonempty_nulls(output_view, stream, mr));
output.first = output.second.new_columns.back()->view();
// Don't need the temp null mask anymore, as we will create a new column.
// However, these null masks are still needed for `purge_nonempty_nulls` thus removing them
// must be done after calling it.
output.second.new_null_masks.clear();
}
return output;
}
std::pair<table_view, temporary_nullable_data> push_down_nulls(table_view const& table,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto processed_columns = std::vector<column_view>{};
auto nullable_data = temporary_nullable_data{};
for (auto const& col : table) {
auto [processed_col, col_nullable_data] = push_down_nulls(col, stream, mr);
processed_columns.emplace_back(std::move(processed_col));
nullable_data.emplace_back(std::move(col_nullable_data));
}
return {table_view{processed_columns}, std::move(nullable_data)};
}
bool contains_null_structs(column_view const& col)
{
return (is_struct(col) && col.has_nulls()) ||
std::any_of(col.child_begin(), col.child_end(), contains_null_structs);
}
} // namespace cudf::structs::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/structs
|
rapidsai_public_repos/cudf/cpp/src/structs/scan/scan_inclusive.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <reductions/nested_type_minmax_util.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <vector>
namespace cudf {
namespace structs {
namespace detail {
namespace {
} // namespace
template <typename Op>
std::unique_ptr<column> scan_inclusive(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Create a gather map containing indices of the prefix min/max elements.
auto gather_map = rmm::device_uvector<size_type>(input.size(), stream);
auto const binop_generator =
cudf::reduction::detail::comparison_binop_generator::create<Op>(input, stream);
thrust::inclusive_scan(rmm::exec_policy(stream),
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(input.size()),
gather_map.begin(),
binop_generator.binop());
// Gather the children columns of the input column. Must use `get_sliced_child` to properly
// handle input in case it is a sliced view.
auto const input_children = [&] {
auto const it = cudf::detail::make_counting_transform_iterator(
0, [structs_view = structs_column_view{input}, &stream](auto const child_idx) {
return structs_view.get_sliced_child(child_idx, stream);
});
return std::vector<column_view>(it, it + input.num_children());
}();
// Gather the children elements of the prefix min/max struct elements for the output.
auto scanned_children = cudf::detail::gather(table_view{input_children},
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
// Don't need to set a null mask because that will be handled at the caller.
return make_structs_column(
input.size(), std::move(scanned_children), 0, rmm::device_buffer{0, stream, mr}, stream, mr);
}
template std::unique_ptr<column> scan_inclusive<DeviceMin>(column_view const& input_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
template std::unique_ptr<column> scan_inclusive<DeviceMax>(column_view const& input_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace structs
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/structs
|
rapidsai_public_repos/cudf/cpp/src/structs/copying/concatenate.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/concatenate_masks.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <algorithm>
#include <memory>
#include <numeric>
namespace cudf {
namespace structs {
namespace detail {
/**
* @copydoc cudf::structs::detail::concatenate
*/
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// get ordered children
auto ordered_children = extract_ordered_struct_children(columns, stream);
// concatenate them
std::vector<std::unique_ptr<column>> children;
children.reserve(columns[0].num_children());
std::transform(ordered_children.begin(),
ordered_children.end(),
std::back_inserter(children),
[mr, stream](host_span<column_view const> cols) {
return cudf::detail::concatenate(cols, stream, mr);
});
// get total length from concatenated children; if no child exists, we would compute it
auto const acc_size_fn = [](size_type s, column_view const& c) { return s + c.size(); };
auto const total_length =
!children.empty() ? children[0]->size()
: std::accumulate(columns.begin(), columns.end(), size_type{0}, acc_size_fn);
// if any of the input columns have nulls, construct the output mask
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
rmm::device_buffer null_mask =
create_null_mask(total_length, has_nulls ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED);
auto null_mask_data = static_cast<bitmask_type*>(null_mask.data());
auto const null_count =
has_nulls ? cudf::detail::concatenate_masks(columns, null_mask_data, stream) : size_type{0};
// assemble into outgoing list column
return make_structs_column(
total_length, std::move(children), null_count, std::move(null_mask), stream, mr);
}
} // namespace detail
} // namespace structs
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/ast/expressions.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/detail/expression_transformer.hpp>
#include <cudf/ast/detail/operators.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
namespace cudf {
namespace ast {
operation::operation(ast_operator op, expression const& input) : op(op), operands({input})
{
if (cudf::ast::detail::ast_operator_arity(op) != 1) {
CUDF_FAIL("The provided operator is not a unary operator.");
}
}
operation::operation(ast_operator op, expression const& left, expression const& right)
: op(op), operands({left, right})
{
if (cudf::ast::detail::ast_operator_arity(op) != 2) {
CUDF_FAIL("The provided operator is not a binary operator.");
}
}
cudf::size_type literal::accept(detail::expression_parser& visitor) const
{
return visitor.visit(*this);
}
cudf::size_type column_reference::accept(detail::expression_parser& visitor) const
{
return visitor.visit(*this);
}
cudf::size_type operation::accept(detail::expression_parser& visitor) const
{
return visitor.visit(*this);
}
cudf::size_type column_name_reference::accept(detail::expression_parser& visitor) const
{
return visitor.visit(*this);
}
auto literal::accept(detail::expression_transformer& visitor) const
-> decltype(visitor.visit(*this))
{
return visitor.visit(*this);
}
auto column_reference::accept(detail::expression_transformer& visitor) const
-> decltype(visitor.visit(*this))
{
return visitor.visit(*this);
}
auto operation::accept(detail::expression_transformer& visitor) const
-> decltype(visitor.visit(*this))
{
return visitor.visit(*this);
}
auto column_name_reference::accept(detail::expression_transformer& visitor) const
-> decltype(visitor.visit(*this))
{
return visitor.visit(*this);
}
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/ast/expression_parser.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/detail/operators.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <algorithm>
#include <functional>
#include <iterator>
namespace cudf {
namespace ast {
namespace detail {
device_data_reference::device_data_reference(device_data_reference_type reference_type,
cudf::data_type data_type,
cudf::size_type data_index,
table_reference table_source)
: reference_type(reference_type),
data_type(data_type),
data_index(data_index),
table_source(table_source)
{
}
device_data_reference::device_data_reference(device_data_reference_type reference_type,
cudf::data_type data_type,
cudf::size_type data_index)
: reference_type(reference_type),
data_type(data_type),
data_index(data_index),
table_source(table_reference::LEFT)
{
}
cudf::size_type expression_parser::intermediate_counter::take()
{
auto const first_missing = find_first_missing();
used_values.insert(used_values.cbegin() + first_missing, first_missing);
max_used = std::max(max_used, first_missing + 1);
return first_missing;
}
void expression_parser::intermediate_counter::give(cudf::size_type value)
{
// TODO: add comment
auto const lower_bound = std::lower_bound(used_values.cbegin(), used_values.cend(), value);
if ((lower_bound != used_values.cend()) && (*lower_bound == value))
used_values.erase(lower_bound);
}
cudf::size_type expression_parser::intermediate_counter::find_first_missing() const
{
if (used_values.empty() || (used_values.front() != 0)) { return 0; }
// Search for the first non-contiguous pair of elements.
auto diff_not_one = [](auto a, auto b) { return a != b - 1; };
auto it = std::adjacent_find(used_values.cbegin(), used_values.cend(), diff_not_one);
return it != used_values.cend()
? *it + 1 // A missing value was found and is returned.
: used_values.size(); // No missing elements. Return the next element in the sequence.
}
cudf::size_type expression_parser::visit(literal const& expr)
{
if (_expression_count == 0) {
// Handle the trivial case of a literal as the entire expression.
return visit(operation(ast_operator::IDENTITY, expr));
} else {
_expression_count++; // Increment the expression index
auto const data_type = expr.get_data_type(); // Resolve expression type
auto device_view = expr.get_value(); // Construct a scalar device view
auto const literal_index = cudf::size_type(_literals.size()); // Push literal
_literals.push_back(device_view);
auto const source = detail::device_data_reference(detail::device_data_reference_type::LITERAL,
data_type,
literal_index); // Push data reference
return add_data_reference(source);
}
}
cudf::size_type expression_parser::visit(column_reference const& expr)
{
if (_expression_count == 0) {
// Handle the trivial case of a column reference as the entire expression.
return visit(operation(ast_operator::IDENTITY, expr));
} else {
// Increment the expression index
_expression_count++;
// Resolve expression type
cudf::data_type data_type;
if (expr.get_table_source() == table_reference::LEFT) {
data_type = expr.get_data_type(_left);
} else {
if (_right.has_value()) {
data_type = expr.get_data_type(*_right);
} else {
CUDF_FAIL(
"Your expression contains a reference to the RIGHT table even though it will only be "
"evaluated on a single table (by convention, the LEFT table).");
}
}
// Push data reference
auto const source = detail::device_data_reference(detail::device_data_reference_type::COLUMN,
data_type,
expr.get_column_index(),
expr.get_table_source());
return add_data_reference(source);
}
}
cudf::size_type expression_parser::visit(operation const& expr)
{
// Increment the expression index
auto const expression_index = _expression_count++;
// Visit children (operands) of this expression
auto const operand_data_ref_indices = visit_operands(expr.get_operands());
// Resolve operand types
auto data_ref = [this](auto const& index) { return _data_references[index].data_type; };
auto begin = thrust::make_transform_iterator(operand_data_ref_indices.cbegin(), data_ref);
auto end = begin + operand_data_ref_indices.size();
auto const operand_types = std::vector<cudf::data_type>(begin, end);
// Validate types of operand data references match
if (std::adjacent_find(operand_types.cbegin(), operand_types.cend(), std::not_equal_to<>()) !=
operand_types.cend()) {
CUDF_FAIL("An AST expression was provided non-matching operand types.");
}
// Give back intermediate storage locations that are consumed by this operation
std::for_each(
operand_data_ref_indices.cbegin(),
operand_data_ref_indices.cend(),
[this](auto const& data_reference_index) {
auto const operand_source = _data_references[data_reference_index];
if (operand_source.reference_type == detail::device_data_reference_type::INTERMEDIATE) {
auto const intermediate_index = operand_source.data_index;
_intermediate_counter.give(intermediate_index);
}
});
// Resolve expression type
auto const op = expr.get_operator();
auto const data_type = cudf::ast::detail::ast_operator_return_type(op, operand_types);
_operators.push_back(op);
// Push data reference
auto const output = [&]() {
if (expression_index == 0) {
// This expression is the root. Output should be directed to the output column.
return detail::device_data_reference(
detail::device_data_reference_type::COLUMN, data_type, 0, table_reference::OUTPUT);
} else {
// This expression is not the root. Output is an intermediate value.
// Ensure that the output type is fixed width and fits in the intermediate storage.
if (!cudf::is_fixed_width(data_type)) {
CUDF_FAIL(
"The output data type is not a fixed-width type but must be stored in an intermediate.");
} else if (cudf::size_of(data_type) > (_has_nulls ? sizeof(IntermediateDataType<true>)
: sizeof(IntermediateDataType<false>))) {
CUDF_FAIL("The output data type is too large to be stored in an intermediate.");
}
return detail::device_data_reference(
detail::device_data_reference_type::INTERMEDIATE, data_type, _intermediate_counter.take());
}
}();
auto const index = add_data_reference(output);
// Insert source indices from all operands (sources) and this operator (destination)
_operator_source_indices.insert(_operator_source_indices.end(),
operand_data_ref_indices.cbegin(),
operand_data_ref_indices.cend());
_operator_source_indices.push_back(index);
return index;
}
// TODO: Eliminate column name references from expression_parser because
// 2 code paths diverge in supporting column name references:
// 1. column name references are specific to cuIO
// 2. column name references are not supported in the libcudf table operations such as join,
// transform.
cudf::size_type expression_parser::visit(column_name_reference const& expr)
{
CUDF_FAIL("Column name references are not supported in the AST expression parser.");
}
cudf::data_type expression_parser::output_type() const
{
return _data_references.empty() ? cudf::data_type(cudf::type_id::EMPTY)
: _data_references.back().data_type;
}
std::vector<cudf::size_type> expression_parser::visit_operands(
std::vector<std::reference_wrapper<expression const>> operands)
{
auto operand_data_reference_indices = std::vector<cudf::size_type>();
for (auto const& operand : operands) {
auto const operand_data_reference_index = operand.get().accept(*this);
operand_data_reference_indices.push_back(operand_data_reference_index);
}
return operand_data_reference_indices;
}
cudf::size_type expression_parser::add_data_reference(detail::device_data_reference data_ref)
{
// If an equivalent data reference already exists, return its index. Otherwise add this data
// reference and return the new index.
auto const it = std::find(_data_references.cbegin(), _data_references.cend(), data_ref);
if (it != _data_references.cend()) {
return std::distance(_data_references.cbegin(), it);
} else {
_data_references.push_back(data_ref);
return _data_references.size() - 1;
}
}
} // namespace detail
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/sum_of_squares.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> sum_of_squares(column_view const& col,
cudf::data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type(),
simple::detail::element_type_dispatcher<op::sum_of_squares>{},
col,
output_dtype,
std::nullopt,
stream,
mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/sum.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> sum(column_view const& col,
cudf::data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type(),
simple::detail::element_type_dispatcher<op::sum>{},
col,
output_dtype,
init,
stream,
mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/product.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> product(column_view const& col,
cudf::data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type(),
simple::detail::element_type_dispatcher<op::product>{},
col,
output_dtype,
init,
stream,
mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/reductions.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/quantiles.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/reduction.hpp>
#include <cudf/reduction/detail/histogram.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
struct reduce_dispatch_functor {
column_view const col;
data_type output_dtype;
std::optional<std::reference_wrapper<scalar const>> init;
rmm::mr::device_memory_resource* mr;
rmm::cuda_stream_view stream;
reduce_dispatch_functor(column_view const& col,
data_type output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: col(col), output_dtype(output_dtype), init(init), mr(mr), stream(stream)
{
}
template <aggregation::Kind k>
std::unique_ptr<scalar> operator()(reduce_aggregation const& agg)
{
switch (k) {
case aggregation::SUM: return sum(col, output_dtype, init, stream, mr);
case aggregation::PRODUCT: return product(col, output_dtype, init, stream, mr);
case aggregation::MIN: return min(col, output_dtype, init, stream, mr);
case aggregation::MAX: return max(col, output_dtype, init, stream, mr);
case aggregation::ANY: return any(col, output_dtype, init, stream, mr);
case aggregation::ALL: return all(col, output_dtype, init, stream, mr);
case aggregation::HISTOGRAM: return histogram(col, stream, mr);
case aggregation::MERGE_HISTOGRAM: return merge_histogram(col, stream, mr);
case aggregation::SUM_OF_SQUARES: return sum_of_squares(col, output_dtype, stream, mr);
case aggregation::MEAN: return mean(col, output_dtype, stream, mr);
case aggregation::VARIANCE: {
auto var_agg = static_cast<cudf::detail::var_aggregation const&>(agg);
return variance(col, output_dtype, var_agg._ddof, stream, mr);
}
case aggregation::STD: {
auto var_agg = static_cast<cudf::detail::std_aggregation const&>(agg);
return standard_deviation(col, output_dtype, var_agg._ddof, stream, mr);
}
case aggregation::MEDIAN: {
auto current_mr = rmm::mr::get_current_device_resource();
auto sorted_indices = cudf::detail::sorted_order(
table_view{{col}}, {}, {null_order::AFTER}, stream, current_mr);
auto valid_sorted_indices =
cudf::detail::split(*sorted_indices, {col.size() - col.null_count()}, stream)[0];
auto col_ptr = cudf::detail::quantile(
col, {0.5}, interpolation::LINEAR, valid_sorted_indices, true, stream, current_mr);
return cudf::detail::get_element(*col_ptr, 0, stream, mr);
}
case aggregation::QUANTILE: {
auto quantile_agg = static_cast<cudf::detail::quantile_aggregation const&>(agg);
CUDF_EXPECTS(quantile_agg._quantiles.size() == 1,
"Reduction quantile accepts only one quantile value");
auto current_mr = rmm::mr::get_current_device_resource();
auto sorted_indices = cudf::detail::sorted_order(
table_view{{col}}, {}, {null_order::AFTER}, stream, current_mr);
auto valid_sorted_indices =
cudf::detail::split(*sorted_indices, {col.size() - col.null_count()}, stream)[0];
auto col_ptr = cudf::detail::quantile(col,
quantile_agg._quantiles,
quantile_agg._interpolation,
valid_sorted_indices,
true,
stream,
current_mr);
return cudf::detail::get_element(*col_ptr, 0, stream, mr);
}
case aggregation::NUNIQUE: {
auto nunique_agg = static_cast<cudf::detail::nunique_aggregation const&>(agg);
return cudf::make_fixed_width_scalar(
cudf::detail::distinct_count(
col, nunique_agg._null_handling, nan_policy::NAN_IS_VALID, stream),
stream,
mr);
}
case aggregation::NTH_ELEMENT: {
auto nth_agg = static_cast<cudf::detail::nth_element_aggregation const&>(agg);
return nth_element(col, nth_agg._n, nth_agg._null_handling, stream, mr);
}
case aggregation::COLLECT_LIST: {
auto col_agg = static_cast<cudf::detail::collect_list_aggregation const&>(agg);
return collect_list(col, col_agg._null_handling, stream, mr);
}
case aggregation::COLLECT_SET: {
auto col_agg = static_cast<cudf::detail::collect_set_aggregation const&>(agg);
return collect_set(
col, col_agg._null_handling, col_agg._nulls_equal, col_agg._nans_equal, stream, mr);
}
case aggregation::MERGE_LISTS: {
return merge_lists(col, stream, mr);
}
case aggregation::MERGE_SETS: {
auto col_agg = static_cast<cudf::detail::merge_sets_aggregation const&>(agg);
return merge_sets(col, col_agg._nulls_equal, col_agg._nans_equal, stream, mr);
}
case aggregation::TDIGEST: {
CUDF_EXPECTS(output_dtype.id() == type_id::STRUCT,
"Tdigest aggregations expect output type to be STRUCT");
auto td_agg = static_cast<cudf::detail::tdigest_aggregation const&>(agg);
return tdigest::detail::reduce_tdigest(col, td_agg.max_centroids, stream, mr);
}
case aggregation::MERGE_TDIGEST: {
CUDF_EXPECTS(output_dtype.id() == type_id::STRUCT,
"Tdigest aggregations expect output type to be STRUCT");
auto td_agg = static_cast<cudf::detail::merge_tdigest_aggregation const&>(agg);
return tdigest::detail::reduce_merge_tdigest(col, td_agg.max_centroids, stream, mr);
}
default: CUDF_FAIL("Unsupported reduction operator");
}
}
};
std::unique_ptr<scalar> reduce(column_view const& col,
reduce_aggregation const& agg,
data_type output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!init.has_value() || col.type() == init.value().get().type(),
"column and initial value must be the same type");
if (init.has_value() && !(agg.kind == aggregation::SUM || agg.kind == aggregation::PRODUCT ||
agg.kind == aggregation::MIN || agg.kind == aggregation::MAX ||
agg.kind == aggregation::ANY || agg.kind == aggregation::ALL)) {
CUDF_FAIL(
"Initial value is only supported for SUM, PRODUCT, MIN, MAX, ANY, and ALL aggregation types");
}
// Returns default scalar if input column is empty or all null
if (col.size() <= col.null_count()) {
if (agg.kind == aggregation::TDIGEST || agg.kind == aggregation::MERGE_TDIGEST) {
return tdigest::detail::make_empty_tdigest_scalar(stream, mr);
}
if (agg.kind == aggregation::HISTOGRAM) {
return std::make_unique<list_scalar>(
std::move(*reduction::detail::make_empty_histogram_like(col)), true, stream, mr);
}
if (agg.kind == aggregation::MERGE_HISTOGRAM) {
return std::make_unique<list_scalar>(
std::move(*reduction::detail::make_empty_histogram_like(col.child(0))), true, stream, mr);
}
if (output_dtype.id() == type_id::LIST) {
if (col.type() == output_dtype) { return make_empty_scalar_like(col, stream, mr); }
// Under some circumstance, the output type will become the List of input type,
// such as: collect_list or collect_set. So, we have to handcraft the default scalar.
auto scalar = make_list_scalar(empty_like(col)->view(), stream, mr);
scalar->set_valid_async(false, stream);
return scalar;
}
if (output_dtype.id() == type_id::STRUCT) { return make_empty_scalar_like(col, stream, mr); }
auto result = make_default_constructed_scalar(output_dtype, stream, mr);
if (agg.kind == aggregation::ANY || agg.kind == aggregation::ALL) {
// empty input should return false for ANY and return true for ALL
dynamic_cast<numeric_scalar<bool>*>(result.get())
->set_value(agg.kind == aggregation::ALL, stream);
}
return result;
}
return cudf::detail::aggregation_dispatcher(
agg.kind, reduce_dispatch_functor{col, output_dtype, init, stream, mr}, agg);
}
} // namespace detail
} // namespace reduction
std::unique_ptr<scalar> reduce(column_view const& col,
reduce_aggregation const& agg,
data_type output_dtype,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return reduction::detail::reduce(
col, agg, output_dtype, std::nullopt, cudf::get_default_stream(), mr);
}
std::unique_ptr<scalar> reduce(column_view const& col,
reduce_aggregation const& agg,
data_type output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return reduction::detail::reduce(col, agg, output_dtype, init, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/minmax.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/transform_reduce.h>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Basic element for the minmax reduce operation.
*
* Stores the minimum and maximum values that have been encountered so far
*/
template <typename T>
struct minmax_pair {
T min_val;
T max_val;
__host__ __device__ minmax_pair()
: min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){};
__host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){};
__host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){};
};
/**
* @brief Reduce for the minmax operation and return a device scalar.
*
* @tparam Op Binary operator functor
* @tparam InputIterator Input iterator Type
* @tparam OutputType Output scalar type
* @param d_in input iterator
* @param num_items number of items to reduce
* @param binary_op binary operator used to reduce
* @param stream CUDA stream to run kernels on.
* @return rmm::device_scalar<OutputType>
*/
template <typename Op,
typename InputIterator,
typename OutputType = typename thrust::iterator_value<InputIterator>::type>
rmm::device_scalar<OutputType> reduce_device(InputIterator d_in,
size_type num_items,
Op binary_op,
rmm::cuda_stream_view stream)
{
OutputType identity{};
rmm::device_scalar<OutputType> result{identity, stream};
// Allocate temporary storage
size_t storage_bytes = 0;
cub::DeviceReduce::Reduce(
nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value());
auto temp_storage = rmm::device_buffer{storage_bytes, stream};
// Run reduction
cub::DeviceReduce::Reduce(temp_storage.data(),
storage_bytes,
d_in,
result.data(),
num_items,
binary_op,
identity,
stream.value());
return result;
}
/**
* @brief Functor that accepts two minmax_pairs and returns a
* minmax_pair whose minimum and maximum values are the min() and max()
* respectively of the minimums and maximums of the input pairs.
*/
template <typename T>
struct minmax_binary_op
: public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> {
__device__ minmax_pair<T> operator()(minmax_pair<T> const& lhs, minmax_pair<T> const& rhs) const
{
return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val),
thrust::max(lhs.max_val, rhs.max_val)};
}
};
/**
* @brief Creates a minmax_pair<T> from a T
*/
template <typename T>
struct create_minmax {
__device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; }
};
/**
* @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair
* that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(),
* cudf::DeviceMax::identity<T>()>
*/
template <typename T>
struct create_minmax_with_nulls {
__device__ minmax_pair<T> operator()(thrust::pair<T, bool> i)
{
return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{};
}
};
/**
* @brief Dispatch functor for minmax operation.
*
* This uses the reduce function to compute the min and max values
* simultaneously for a column of data.
*
* @tparam T The input column's type
*/
struct minmax_functor {
template <typename T>
static constexpr bool is_supported()
{
return !(std::is_same_v<T, cudf::list_view> || std::is_same_v<T, cudf::struct_view>);
}
template <typename T>
auto reduce(column_view const& col, rmm::cuda_stream_view stream)
{
auto device_col = column_device_view::create(col, stream);
// compute minimum and maximum values
if (col.has_nulls()) {
auto pair_to_minmax = thrust::make_transform_iterator(
make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{});
return reduce_device(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
} else {
auto col_to_minmax =
thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{});
return reduce_device(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream);
}
}
/**
* @brief Functor to copy a minmax_pair result to individual scalar instances.
*
* @tparam T type of the data
* @tparam ResultType result type to assign min, max to minmax_pair<T>
*/
template <typename T, typename ResultType = minmax_pair<T>>
struct assign_min_max {
__device__ void operator()()
{
*min_data = result->min_val;
*max_data = result->max_val;
}
ResultType* result;
T* min_data;
T* max_data;
};
template <typename T,
std::enable_if_t<is_supported<T>() and !std::is_same_v<T, cudf::string_view> and
!cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
using storage_type = device_storage_type_t<T>;
// compute minimum and maximum values
auto dev_result = reduce<storage_type>(col, stream);
// create output scalars
using ScalarType = cudf::scalar_type_t<T>;
auto minimum = new ScalarType(T{}, true, stream, mr);
auto maximum = new ScalarType(T{}, true, stream, mr);
// copy dev_result to the output scalars
device_single_thread(
assign_min_max<storage_type>{dev_result.data(), minimum->data(), maximum->data()}, stream);
return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)};
}
/**
* @brief Specialization for strings column.
*/
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<cudf::string_view>(col, stream);
// copy the minmax_pair to the host; does not copy the strings
using OutputType = minmax_pair<cudf::string_view>;
OutputType host_result;
CUDF_CUDA_TRY(cudaMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDefault, stream.value()));
// strings are copied to create the scalars here
return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr),
std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)};
}
/**
* @brief Specialization for dictionary column.
*/
template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// compute minimum and maximum values
auto dev_result = reduce<T>(col, stream);
// copy the minmax_pair to the host to call get_element
using OutputType = minmax_pair<T>;
OutputType host_result;
CUDF_CUDA_TRY(cudaMemcpyAsync(
&host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDefault, stream.value()));
// get the keys for those indexes
auto const keys = dictionary_column_view(col).keys();
return {detail::get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr),
detail::get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)};
}
template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr>
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()(
cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
{
CUDF_FAIL("type not supported for minmax() operation");
}
};
} // namespace
/**
* @copydoc cudf::minmax
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
if (col.null_count() == col.size()) {
// this handles empty and all-null columns
// return scalars with valid==false
return {make_default_constructed_scalar(col.type(), stream, mr),
make_default_constructed_scalar(col.type(), stream, mr)};
}
return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr);
}
} // namespace detail
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
column_view const& col, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::minmax(col, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/mean.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> mean(column_view const& col,
cudf::data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto col_type =
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type();
using reducer = compound::detail::element_type_dispatcher<op::mean>;
return cudf::type_dispatcher(
col_type, reducer(), col, output_dtype, /* ddof is not used for mean*/ 1, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/nth_element.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
namespace cudf::reduction::detail {
std::unique_ptr<cudf::scalar> nth_element(column_view const& col,
size_type n,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(n >= -col.size() and n < col.size(), "Index out of bounds");
auto wrap_n = [n](size_type size) { return (n < 0 ? size + n : n); };
if (null_handling == null_policy::EXCLUDE and col.has_nulls()) {
auto valid_count = col.size() - col.null_count();
n = wrap_n(valid_count);
CUDF_EXPECTS(n >= 0 and n < valid_count, "Index out of bounds");
auto dcol = column_device_view::create(col, stream);
auto bitmask_iterator =
thrust::make_transform_iterator(cudf::detail::make_validity_iterator(*dcol),
[] __device__(auto b) { return static_cast<size_type>(b); });
rmm::device_uvector<size_type> null_skipped_index(col.size(), stream);
// null skipped index for valids only.
thrust::inclusive_scan(rmm::exec_policy(stream),
bitmask_iterator,
bitmask_iterator + col.size(),
null_skipped_index.begin());
auto n_pos = thrust::upper_bound(
rmm::exec_policy(stream), null_skipped_index.begin(), null_skipped_index.end(), n);
auto null_skipped_n = n_pos - null_skipped_index.begin();
return cudf::detail::get_element(col, null_skipped_n, stream, mr);
} else {
n = wrap_n(col.size());
return cudf::detail::get_element(col, n, stream, mr);
}
}
} // namespace cudf::reduction::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/compound.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/reduction/detail/reduction.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/iterator/transform_iterator.h>
namespace cudf {
namespace reduction {
namespace compound {
namespace detail {
/**
* @brief Multi-step reduction for operations such as mean, variance, and standard deviation.
*
* @tparam ElementType the input column data-type
* @tparam ResultType the output data-type
* @tparam Op the compound operator derived from `cudf::reduction::op::compound_op`
*
* @param col input column view
* @param output_dtype data type of return type and typecast elements of input column
* @param ddof Delta degrees of freedom used for standard deviation and variance. The divisor used
* is N - ddof, where N represents the number of elements.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Output scalar in device memory
*/
template <typename ElementType, typename ResultType, typename Op>
std::unique_ptr<scalar> compound_reduction(column_view const& col,
data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const valid_count = col.size() - col.null_count();
// reduction by iterator
auto dcol = cudf::column_device_view::create(col, stream);
std::unique_ptr<scalar> result;
Op compound_op{};
if (!cudf::is_dictionary(col.type())) {
if (col.has_nulls()) {
auto it = thrust::make_transform_iterator(
dcol->pair_begin<ElementType, true>(),
compound_op.template get_null_replacing_element_transformer<ResultType>());
result = cudf::reduction::detail::reduce<Op, decltype(it), ResultType>(
it, col.size(), compound_op, valid_count, ddof, stream, mr);
} else {
auto it = thrust::make_transform_iterator(
dcol->begin<ElementType>(), compound_op.template get_element_transformer<ResultType>());
result = cudf::reduction::detail::reduce<Op, decltype(it), ResultType>(
it, col.size(), compound_op, valid_count, ddof, stream, mr);
}
} else {
auto it = thrust::make_transform_iterator(
cudf::dictionary::detail::make_dictionary_pair_iterator<ElementType>(*dcol, col.has_nulls()),
compound_op.template get_null_replacing_element_transformer<ResultType>());
result = cudf::reduction::detail::reduce<Op, decltype(it), ResultType>(
it, col.size(), compound_op, valid_count, ddof, stream, mr);
}
// set scalar is valid
result->set_valid_async(col.null_count() < col.size(), stream);
return result;
};
// @brief result type dispatcher for compound reduction (a.k.a. mean, var, std)
template <typename ElementType, typename Op>
struct result_type_dispatcher {
private:
template <typename ResultType>
static constexpr bool is_supported_v()
{
// the operator `mean`, `var`, `std` only accepts
// floating points as output dtype
return std::is_floating_point_v<ResultType>;
}
public:
template <typename ResultType, std::enable_if_t<is_supported_v<ResultType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return compound_reduction<ElementType, ResultType, Op>(col, output_dtype, ddof, stream, mr);
}
template <typename ResultType, std::enable_if_t<not is_supported_v<ResultType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Unsupported output data type");
}
};
// @brief input column element dispatcher for compound reduction (a.k.a. mean, var, std)
template <typename Op>
struct element_type_dispatcher {
private:
// return true if ElementType is arithmetic type
template <typename ElementType>
static constexpr bool is_supported_v()
{
return std::is_arithmetic_v<ElementType>;
}
public:
template <typename ElementType, std::enable_if_t<is_supported_v<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(
output_dtype, result_type_dispatcher<ElementType, Op>(), col, output_dtype, ddof, stream, mr);
}
template <typename ElementType, std::enable_if_t<not is_supported_v<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL(
"Reduction operators other than `min` and `max`"
" are not supported for non-arithmetic types");
}
};
} // namespace detail
} // namespace compound
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/min.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> min(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const input_type =
cudf::is_dictionary(col.type()) ? cudf::dictionary_column_view(col).keys().type() : col.type();
CUDF_EXPECTS(input_type == output_dtype, "min() operation requires matching output type");
auto const dispatch_type = cudf::is_dictionary(col.type())
? cudf::dictionary_column_view(col).indices().type()
: col.type();
using reducer = simple::detail::same_element_type_dispatcher<op::min>;
return cudf::type_dispatcher(dispatch_type, reducer{}, col, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/any.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <cuda/atomic>
namespace cudf {
namespace reduction {
namespace detail {
namespace {
/**
* @brief Compute reduction any() for dictionary columns.
*
* This compiles 10x faster than using thrust::reduce or the
* cudf::simple::reduction::detail::reduce utility.
* Both of these use the CUB DeviceReduce which aggressively inlines
* the input iterator logic.
*/
struct any_fn {
template <typename Iterator>
struct any_true_fn {
__device__ void operator()(size_type idx)
{
if (!*d_result && (iter[idx] != *d_result)) {
cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*d_result};
ref.fetch_or(1, cuda::std::memory_order_relaxed);
}
}
Iterator iter;
int32_t* d_result;
};
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const d_dict = cudf::column_device_view::create(input, stream);
auto const iter = [&] {
auto null_iter = op::max{}.template get_null_replacing_element_transformer<bool>();
auto pair_iter =
cudf::dictionary::detail::make_dictionary_pair_iterator<T>(*d_dict, input.has_nulls());
return thrust::make_transform_iterator(pair_iter, null_iter);
}();
auto d_result = rmm::device_scalar<int32_t>(0, stream, rmm::mr::get_current_device_resource());
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
input.size(),
any_true_fn<decltype(iter)>{iter, d_result.data()});
return std::make_unique<numeric_scalar<bool>>(d_result.value(stream), true, stream, mr);
}
template <typename T, std::enable_if_t<!std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Unexpected key type for dictionary in reduction any()");
}
};
} // namespace
std::unique_ptr<cudf::scalar> any(column_view const& col,
cudf::data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8),
"any() operation can be applied with output type `bool8` only");
if (cudf::is_dictionary(col.type())) {
return cudf::type_dispatcher(
dictionary_column_view(col).keys().type(), detail::any_fn{}, col, stream, mr);
}
using reducer = simple::detail::bool_result_element_dispatcher<op::max>;
// dispatch for non-dictionary types
return cudf::type_dispatcher(col.type(), reducer{}, col, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/all.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <cuda/atomic>
namespace cudf {
namespace reduction {
namespace detail {
namespace {
/**
* @brief Compute reduction all() for dictionary columns.
*
* This compiles 10x faster than using thrust::reduce or the
* cudf::simple::reduction::detail::reduce utility.
* Both of these use the CUB DeviceReduce which aggressively inlines
* the input iterator logic.
*/
struct all_fn {
template <typename Iterator>
struct all_true_fn {
__device__ void operator()(size_type idx)
{
if (*d_result && (iter[idx] != *d_result)) {
cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*d_result};
ref.fetch_and(0, cuda::std::memory_order_relaxed);
}
}
Iterator iter;
int32_t* d_result;
};
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const d_dict = cudf::column_device_view::create(input, stream);
auto const iter = [&] {
auto null_iter = op::min{}.template get_null_replacing_element_transformer<bool>();
auto pair_iter =
cudf::dictionary::detail::make_dictionary_pair_iterator<T>(*d_dict, input.has_nulls());
return thrust::make_transform_iterator(pair_iter, null_iter);
}();
auto d_result = rmm::device_scalar<int32_t>(1, stream, rmm::mr::get_current_device_resource());
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
input.size(),
all_true_fn<decltype(iter)>{iter, d_result.data()});
return std::make_unique<numeric_scalar<bool>>(d_result.value(stream), true, stream, mr);
}
template <typename T, std::enable_if_t<!std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Unexpected key type for dictionary in reduction all()");
}
};
} // namespace
std::unique_ptr<cudf::scalar> all(column_view const& col,
cudf::data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8),
"all() operation can be applied with output type `BOOL8` only");
if (cudf::is_dictionary(col.type())) {
return cudf::type_dispatcher(
dictionary_column_view(col).keys().type(), detail::all_fn{}, col, stream, mr);
}
using reducer = simple::detail::bool_result_element_dispatcher<op::min>;
// dispatch for non-dictionary types
return cudf::type_dispatcher(col.type(), reducer{}, col, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/max.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> max(column_view const& col,
cudf::data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const input_type =
cudf::is_dictionary(col.type()) ? cudf::dictionary_column_view(col).keys().type() : col.type();
CUDF_EXPECTS(input_type == output_dtype, "max() operation requires matching output type");
auto const dispatch_type = cudf::is_dictionary(col.type())
? cudf::dictionary_column_view(col).indices().type()
: col.type();
using reducer = simple::detail::same_element_type_dispatcher<op::max>;
return cudf::type_dispatcher(dispatch_type, reducer{}, col, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/var.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> variance(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// TODO: add cuda version check when the fix is available
#if !defined(__CUDACC_DEBUG__)
using reducer = compound::detail::element_type_dispatcher<op::variance>;
auto col_type =
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type();
return cudf::type_dispatcher(col_type, reducer(), col, output_dtype, ddof, stream, mr);
#else
// workaround for bug 200529165 which causes compilation error only at device debug build
// hopefully the bug will be fixed in future cuda version (still failing in 11.2)
CUDF_FAIL("var/std reductions are not supported at debug build.");
#endif
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/nested_type_minmax_util.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/reduction/detail/reduction_operators.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
/**
* @brief Binary operator ArgMin/ArgMax with index values into the input table.
*/
template <typename DeviceComparator>
struct row_arg_minmax_fn {
size_type const num_rows;
DeviceComparator const comp;
bool const is_arg_min;
row_arg_minmax_fn(size_type num_rows_, DeviceComparator comp_, bool const is_arg_min_)
: num_rows{num_rows_}, comp{std::move(comp_)}, is_arg_min{is_arg_min_}
{
}
// This function is explicitly prevented from inlining, because it calls to
// `DeviceComparator::operator()` which is inlined and very heavy-weight. Inlining
// this would result in huge code and significantly compile time when instantiated and
// used with `thrust::reduce_by_key` or `thrust::scan_by_key`.
__attribute__((noinline)) __device__ auto operator()(size_type lhs_idx, size_type rhs_idx) const
{
// The extra bounds checking is due to issue github.com/rapidsai/cudf/issues/9156 and
// github.com/NVIDIA/thrust/issues/1525
// where invalid random values may be passed here by thrust::reduce_by_key
if (lhs_idx < 0 || lhs_idx >= num_rows) { return rhs_idx; }
if (rhs_idx < 0 || rhs_idx >= num_rows) { return lhs_idx; }
// Return `lhs_idx` iff:
// row(lhs_idx) < row(rhs_idx) and finding ArgMin, or
// row(lhs_idx) >= row(rhs_idx) and finding ArgMax.
return comp(lhs_idx, rhs_idx) == is_arg_min ? lhs_idx : rhs_idx;
}
};
/**
* @brief The null order when comparing a null with non-null elements. Currently support only the
* default null order: nulls are compared as LESS than any other non-null elements.
*/
auto static constexpr DEFAULT_NULL_ORDER = cudf::null_order::BEFORE;
/**
* @brief The utility class to provide a binary operator object for lexicographic comparison of
* nested-type elements.
*
* The binary operator provided by this class has an explicit non-inline `operator()` method to
* prevent excessive compile time when working with `thrust::reduce_by_key`.
*
* When it is a structs or a lists column, top-level NULLs are compared as larger than all other
* non-null elements - if finding for ARGMIN, or smaller than all other non-null elements - if
* finding for ARGMAX. This helps achieve the results of finding the min or max element when nulls
* are excluded from the operations, returning null only when all the input elements are nulls.
*/
class comparison_binop_generator {
private:
cudf::table_view const input_tview;
bool const has_nulls;
bool const is_min_op;
rmm::cuda_stream_view stream;
// Contains data used in `row_comparator` below, thus needs to be kept alive as a member variable.
std::unique_ptr<cudf::structs::detail::flattened_table> const flattened_input;
// Contains data used in the returned binop, thus needs to be kept alive as a member variable.
cudf::experimental::row::lexicographic::self_comparator row_comparator;
comparison_binop_generator(column_view const& input_,
bool is_min_op_,
rmm::cuda_stream_view stream_)
: input_tview{cudf::table_view{{input_}}},
has_nulls{cudf::has_nested_nulls(input_tview)},
is_min_op{is_min_op_},
stream{stream_},
flattened_input{cudf::structs::detail::flatten_nested_columns(
input_tview,
{},
std::vector<null_order>{DEFAULT_NULL_ORDER},
cudf::structs::detail::column_nullability::MATCH_INCOMING,
stream,
rmm::mr::get_current_device_resource())},
row_comparator{[&input_,
&input_tview = input_tview,
&flattened_input = flattened_input,
is_min_op_,
stream_]() {
if (is_min_op_ && input_.has_nulls()) {
// If the input column is nested type (struct/list) and has nulls (at the top level), null
// structs/lists are excluded from the operations. That is equivalent to considering
// top-level nulls as larger than all other non-null elements (if finding for ARGMIN), or
// smaller than all other non-null elements (if finding for ARGMAX).
if (input_.type().id() == cudf::type_id::STRUCT) {
// For struct type, it is simple: Just set a separate null order (`null_order::AFTER`)
// for the top level column, which is stored at the first position in the null_orders
// array resulted from struct flattening.
auto null_orders = flattened_input->null_orders();
null_orders.front() = cudf::null_order::AFTER;
return cudf::experimental::row::lexicographic::self_comparator{
flattened_input->flattened_columns(), {}, null_orders, stream_};
} else {
// For list type, we cannot set a separate null order for the top level column.
// Thus, we have to workaround this by creating a dummy (empty) struct column view
// having the same null mask as the input lists column.
// This dummy column will have a different null order (`null_order::AFTER`).
auto const null_orders =
std::vector<null_order>{cudf::null_order::AFTER, DEFAULT_NULL_ORDER};
auto const dummy_struct = column_view{data_type{type_id::STRUCT},
input_.size(),
nullptr,
input_.null_mask(),
input_.null_count(),
0,
{}};
return cudf::experimental::row::lexicographic::self_comparator{
cudf::table_view{{dummy_struct, input_}}, {}, null_orders, stream_};
}
} else {
return cudf::experimental::row::lexicographic::self_comparator{
input_tview, {}, std::vector<null_order>{DEFAULT_NULL_ORDER}, stream_};
}
}()}
{
}
public:
auto binop() const
{
auto const device_comp = row_comparator.less<true>(cudf::nullate::DYNAMIC{has_nulls});
return row_arg_minmax_fn(input_tview.num_rows(), device_comp, is_min_op);
}
template <typename BinOp>
static auto create(column_view const& input, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(cudf::is_nested(input.type()),
"This utility class is designed exclusively for nested input types.");
return comparison_binop_generator(input,
std::is_same_v<BinOp, cudf::reduction::detail::op::min> ||
std::is_same_v<BinOp, cudf::DeviceMin>,
stream);
}
template <cudf::aggregation::Kind K>
static auto create(column_view const& input, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(cudf::is_nested(input.type()),
"This utility class is designed exclusively for nested input types.");
return comparison_binop_generator(
input, K == cudf::aggregation::MIN || K == cudf::aggregation::ARGMIN, stream);
}
};
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/simple.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "nested_type_minmax_util.cuh"
#include <cudf/detail/copy.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
namespace cudf {
namespace reduction {
namespace simple {
namespace detail {
/**
* @brief Reduction for 'sum', 'product', 'min', 'max', 'sum of squares'
* which directly compute the reduction by a single step reduction call
*
* @tparam ElementType the input column data-type
* @tparam ResultType the output data-type
* @tparam Op the operator of cudf::reduction::op::
* @param col Input column of data to reduce
* @param init Optional initial value of the reduction
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Output scalar in device memory
*/
template <typename ElementType, typename ResultType, typename Op>
std::unique_ptr<scalar> simple_reduction(column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// reduction by iterator
auto dcol = cudf::column_device_view::create(col, stream);
auto simple_op = Op{};
// Cast initial value
std::optional<ResultType> const initial_value = [&] {
if (init.has_value() && init.value().get().is_valid()) {
using ScalarType = cudf::scalar_type_t<ElementType>;
auto input_value = static_cast<ScalarType const*>(&init.value().get())->value(stream);
return std::optional<ResultType>(static_cast<ResultType>(input_value));
} else {
return std::optional<ResultType>(std::nullopt);
}
}();
auto result = [&] {
if (col.has_nulls()) {
auto f = simple_op.template get_null_replacing_element_transformer<ResultType>();
auto it = thrust::make_transform_iterator(dcol->pair_begin<ElementType, true>(), f);
return cudf::reduction::detail::reduce(it, col.size(), simple_op, initial_value, stream, mr);
} else {
auto f = simple_op.template get_element_transformer<ResultType>();
auto it = thrust::make_transform_iterator(dcol->begin<ElementType>(), f);
return cudf::reduction::detail::reduce(it, col.size(), simple_op, initial_value, stream, mr);
}
}();
// set scalar is valid
result->set_valid_async(
col.null_count() < col.size() && (!init.has_value() || init.value().get().is_valid()), stream);
return result;
}
/**
* @brief Reduction for `sum`, `product`, `min` and `max` for decimal types
*
* @tparam DecimalXX The `decimal32`, `decimal64` or `decimal128` type
* @tparam Op The operator of cudf::reduction::op::
*
* @param col Input column of data to reduce
* @param init Optional initial value of the reduction
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Output scalar in device memory
*/
template <typename DecimalXX, typename Op>
std::unique_ptr<scalar> fixed_point_reduction(
column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Type = device_storage_type_t<DecimalXX>;
auto result = simple_reduction<Type, Type, Op>(col, init, stream, mr);
auto const scale = [&] {
if (std::is_same_v<Op, cudf::reduction::detail::op::product>) {
auto const valid_count = static_cast<int32_t>(col.size() - col.null_count());
return numeric::scale_type{col.type().scale() * (valid_count + (init.has_value() ? 1 : 0))};
} else if (std::is_same_v<Op, cudf::reduction::detail::op::sum_of_squares>) {
return numeric::scale_type{col.type().scale() * 2};
}
return numeric::scale_type{col.type().scale()};
}();
auto const val = static_cast<cudf::scalar_type_t<Type>*>(result.get());
auto result_scalar =
cudf::make_fixed_point_scalar<DecimalXX>(val->value(stream), scale, stream, mr);
result_scalar->set_valid_async(
col.null_count() < col.size() && (!init.has_value() || init.value().get().is_valid()), stream);
return result_scalar;
}
/**
* @brief Reduction for 'sum', 'product', 'sum of squares' for dictionary columns.
*
* @tparam ElementType The key type of the input dictionary column.
* @tparam ResultType The output data-type for the resulting scalar
* @tparam Op The operator of cudf::reduction::op::
*
* @param col Input dictionary column of data to reduce
* @param init Optional initial value of the reduction
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Output scalar in device memory
*/
template <typename ElementType, typename ResultType, typename Op>
std::unique_ptr<scalar> dictionary_reduction(
column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (init.has_value()) { CUDF_FAIL("Initial value not supported for dictionary reductions"); }
auto dcol = cudf::column_device_view::create(col, stream);
auto simple_op = Op{};
auto result = [&] {
auto f = simple_op.template get_null_replacing_element_transformer<ResultType>();
auto p =
cudf::dictionary::detail::make_dictionary_pair_iterator<ElementType>(*dcol, col.has_nulls());
auto it = thrust::make_transform_iterator(p, f);
return cudf::reduction::detail::reduce(it, col.size(), simple_op, {}, stream, mr);
}();
// set scalar is valid
result->set_valid_async(
col.null_count() < col.size() && (!init.has_value() || init.value().get().is_valid()), stream);
return result;
}
/**
* @brief Convert a numeric scalar to another numeric scalar.
*
* The input value and validity are cast to the output scalar.
*
* @tparam InputType The type of the input scalar to copy from
* @tparam OutputType The output scalar type to copy to
*/
template <typename InputType, typename OutputType>
struct assign_scalar_fn {
__device__ void operator()()
{
d_output.set_value(static_cast<OutputType>(d_input.value()));
d_output.set_valid(d_input.is_valid());
}
cudf::numeric_scalar_device_view<InputType> d_input;
cudf::numeric_scalar_device_view<OutputType> d_output;
};
/**
* @brief A type-dispatcher functor for converting a numeric scalar.
*
* The InputType is known and the dispatch is on the ResultType
* which is the output numeric scalar type.
*
* @tparam InputType The scalar type to convert from
*/
template <typename InputType>
struct cast_numeric_scalar_fn {
private:
template <typename ResultType>
static constexpr bool is_supported()
{
return cudf::is_convertible<InputType, ResultType>::value && cudf::is_numeric<ResultType>();
}
public:
template <typename ResultType, std::enable_if_t<is_supported<ResultType>()>* = nullptr>
std::unique_ptr<scalar> operator()(numeric_scalar<InputType>* input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto d_input = cudf::get_scalar_device_view(*input);
auto result = std::make_unique<numeric_scalar<ResultType>>(ResultType{}, true, stream, mr);
auto d_output = cudf::get_scalar_device_view(*result);
cudf::detail::device_single_thread(assign_scalar_fn<InputType, ResultType>{d_input, d_output},
stream);
return result;
}
template <typename ResultType, std::enable_if_t<not is_supported<ResultType>()>* = nullptr>
std::unique_ptr<scalar> operator()(numeric_scalar<InputType>*,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("input data type is not convertible to output data type");
}
};
/**
* @brief Call reduce and return a scalar of type bool.
*
* This is used by operations `any()` and `all()`.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct bool_result_element_dispatcher {
template <typename ElementType, std::enable_if_t<std::is_arithmetic_v<ElementType>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return simple_reduction<ElementType, bool, Op>(col, init, stream, mr);
}
template <typename ElementType,
std::enable_if_t<not std::is_arithmetic_v<ElementType>>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const&,
std::optional<std::reference_wrapper<scalar const>>,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
/**
* @brief Call reduce and return a scalar of type matching the input column.
*
* This is used by operations `min()` and `max()`.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct same_element_type_dispatcher {
private:
template <typename ElementType>
static constexpr bool is_supported()
{
return !cudf::is_dictionary<ElementType>();
}
template <typename IndexType, std::enable_if_t<cudf::is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> resolve_key(column_view const& keys,
scalar const& keys_index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto& index = static_cast<numeric_scalar<IndexType> const&>(keys_index);
return cudf::detail::get_element(keys, index.value(stream), stream, mr);
}
template <typename IndexType, std::enable_if_t<!cudf::is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> resolve_key(column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("index type expected for dictionary column");
}
public:
template <typename ElementType,
std::enable_if_t<cudf::is_nested<ElementType>() &&
(std::is_same_v<Op, cudf::reduction::detail::op::min> ||
std::is_same_v<Op, cudf::reduction::detail::op::max>)>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& input,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (init.has_value()) { CUDF_FAIL("Initial value not supported for nested type reductions"); }
if (input.is_empty()) { return cudf::make_empty_scalar_like(input, stream, mr); }
// We will do reduction to find the ARGMIN/ARGMAX index, then return the element at that index.
auto const binop_generator =
cudf::reduction::detail::comparison_binop_generator::create<Op>(input, stream);
auto const minmax_idx = thrust::reduce(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.size()),
size_type{0},
binop_generator.binop());
return cudf::detail::get_element(input, minmax_idx, stream, mr);
}
template <typename ElementType,
std::enable_if_t<is_supported<ElementType>() && !cudf::is_nested<ElementType>() &&
!cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (!cudf::is_dictionary(col.type())) {
return simple_reduction<ElementType, ElementType, Op>(col, init, stream, mr);
}
auto index = simple_reduction<ElementType, ElementType, Op>(
dictionary_column_view(col).get_indices_annotated(),
init,
stream,
rmm::mr::get_current_device_resource());
return resolve_key<ElementType>(dictionary_column_view(col).keys(), *index, stream, mr);
}
template <typename ElementType, std::enable_if_t<cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return fixed_point_reduction<ElementType, Op>(col, init, stream, mr);
}
template <typename ElementType, std::enable_if_t<not is_supported<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const&,
std::optional<std::reference_wrapper<scalar const>>,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
/**
* @brief Call reduce and return a scalar of the type specified.
*
* This is used by operations sum(), product(), and sum_of_squares().
* It only supports numeric types. If the output type is not the
* same as the input type, an extra cast operation may incur.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct element_type_dispatcher {
/**
* @brief Specialization for reducing floating-point column types to any output type.
*/
template <typename ElementType,
std::enable_if_t<std::is_floating_point_v<ElementType>>* = nullptr>
std::unique_ptr<scalar> reduce_numeric(column_view const& col,
data_type const output_type,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto result = !cudf::is_dictionary(col.type())
? simple_reduction<ElementType, double, Op>(col, init, stream, mr)
: dictionary_reduction<ElementType, double, Op>(col, init, stream, mr);
if (output_type == result->type()) return result;
// this will cast the result to the output_type
return cudf::type_dispatcher(output_type,
cast_numeric_scalar_fn<double>{},
static_cast<numeric_scalar<double>*>(result.get()),
stream,
mr);
}
/**
* @brief Specialization for reducing integer column types to any output type.
*/
template <typename ElementType, std::enable_if_t<std::is_integral_v<ElementType>>* = nullptr>
std::unique_ptr<scalar> reduce_numeric(column_view const& col,
data_type const output_type,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto result = !cudf::is_dictionary(col.type())
? simple_reduction<ElementType, int64_t, Op>(col, init, stream, mr)
: dictionary_reduction<ElementType, int64_t, Op>(col, init, stream, mr);
if (output_type == result->type()) return result;
// this will cast the result to the output_type
return cudf::type_dispatcher(output_type,
cast_numeric_scalar_fn<int64_t>{},
static_cast<numeric_scalar<int64_t>*>(result.get()),
stream,
mr);
}
/**
* @brief Called by the type-dispatcher to reduce the input column `col` using
* the `Op` operation.
*
* @tparam ElementType The input column type or key type
* @param col Input column (must be numeric)
* @param output_type Requested type of the scalar result
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
*/
template <typename ElementType, std::enable_if_t<cudf::is_numeric<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
data_type const output_type,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (output_type.id() == cudf::type_to_id<ElementType>())
return !cudf::is_dictionary(col.type())
? simple_reduction<ElementType, ElementType, Op>(col, init, stream, mr)
: dictionary_reduction<ElementType, ElementType, Op>(col, init, stream, mr);
// reduce and map to output type
return reduce_numeric<ElementType>(col, output_type, init, stream, mr);
}
/**
* @brief Specialization for reducing fixed_point column types to fixed_point number
*/
template <typename ElementType, std::enable_if_t<cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
data_type const output_type,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_type == col.type(), "Output type must be same as input column type.");
return fixed_point_reduction<ElementType, Op>(col, init, stream, mr);
}
template <typename ElementType,
std::enable_if_t<not cudf::is_numeric<ElementType>() and
not cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const&,
data_type const,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
} // namespace detail
} // namespace simple
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/collect_ops.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_factories.hpp>
namespace cudf {
namespace reduction {
namespace detail {
namespace {
/**
* @brief Check if we need to handle nulls in the input column.
*
* @param input The input column
* @param null_handling The null handling policy
* @return A boolean value indicating if we need to handle nulls
*/
bool need_handle_nulls(column_view const& input, null_policy null_handling)
{
return null_handling == null_policy::EXCLUDE && input.has_nulls();
}
} // namespace
std::unique_ptr<scalar> collect_list(column_view const& col,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (need_handle_nulls(col, null_handling)) {
auto d_view = column_device_view::create(col, stream);
auto filter = cudf::detail::validity_accessor(*d_view);
auto null_purged_table = cudf::detail::copy_if(table_view{{col}}, filter, stream, mr);
column* null_purged_col = null_purged_table->release().front().release();
null_purged_col->set_null_mask(rmm::device_buffer{0, stream, mr}, 0);
return std::make_unique<list_scalar>(std::move(*null_purged_col), true, stream, mr);
} else {
return make_list_scalar(col, stream, mr);
}
}
std::unique_ptr<scalar> merge_lists(lists_column_view const& col,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto flatten_col = col.get_sliced_child(stream);
return make_list_scalar(flatten_col, stream, mr);
}
std::unique_ptr<scalar> collect_set(column_view const& col,
null_policy null_handling,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// `input_as_collect_list` is the result of the input column that has been processed to obey
// the given null handling behavior.
[[maybe_unused]] auto const [input_as_collect_list, unused_scalar] = [&] {
if (need_handle_nulls(col, null_handling)) {
// Only call `collect_list` when we need to handle nulls.
auto scalar = collect_list(col, null_handling, stream, mr);
return std::pair(static_cast<list_scalar*>(scalar.get())->view(), std::move(scalar));
}
return std::pair(col, std::unique_ptr<scalar>(nullptr));
}();
auto distinct_table = cudf::detail::distinct(table_view{{input_as_collect_list}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_ANY,
nulls_equal,
nans_equal,
stream,
mr);
return std::make_unique<list_scalar>(std::move(distinct_table->get_column(0)), true, stream, mr);
}
std::unique_ptr<scalar> merge_sets(lists_column_view const& col,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto flatten_col = col.get_sliced_child(stream);
auto distinct_table = cudf::detail::distinct(table_view{{flatten_col}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_ANY,
nulls_equal,
nans_equal,
stream,
mr);
return std::make_unique<list_scalar>(std::move(distinct_table->get_column(0)), true, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/histogram.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/hash_reduce_by_row.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <cuda/atomic>
#include <optional>
namespace cudf::reduction::detail {
namespace {
// Always use 64-bit signed integer for storing count.
using histogram_count_type = int64_t;
/**
* @brief The functor to accumulate the frequency of each distinct rows in the input table.
*/
template <typename MapView, typename KeyHasher, typename KeyEqual, typename CountType>
struct reduce_fn : cudf::detail::reduce_by_row_fn_base<MapView, KeyHasher, KeyEqual, CountType> {
CountType const* d_partial_output;
reduce_fn(MapView const& d_map,
KeyHasher const& d_hasher,
KeyEqual const& d_equal,
CountType* const d_output,
CountType const* const d_partial_output)
: cudf::detail::reduce_by_row_fn_base<MapView, KeyHasher, KeyEqual, CountType>{d_map,
d_hasher,
d_equal,
d_output},
d_partial_output{d_partial_output}
{
}
// Count the number of rows in each group of rows that are compared equal.
__device__ void operator()(size_type const idx) const
{
auto const increment = d_partial_output ? d_partial_output[idx] : CountType{1};
auto const count =
cuda::atomic_ref<CountType, cuda::thread_scope_device>(*this->get_output_ptr(idx));
count.fetch_add(increment, cuda::std::memory_order_relaxed);
}
};
/**
* @brief The builder to construct an instance of `reduce_fn` functor.
*/
template <typename CountType>
struct reduce_func_builder {
CountType const* const d_partial_output;
reduce_func_builder(CountType const* const d_partial_output) : d_partial_output{d_partial_output}
{
}
template <typename MapView, typename KeyHasher, typename KeyEqual>
auto build(MapView const& d_map,
KeyHasher const& d_hasher,
KeyEqual const& d_equal,
CountType* const d_output)
{
return reduce_fn<MapView, KeyHasher, KeyEqual, CountType>{
d_map, d_hasher, d_equal, d_output, d_partial_output};
}
};
/**
* @brief Specialized functor to check for not-zero of the second component of the input.
*/
struct is_not_zero {
template <typename Pair>
__device__ bool operator()(Pair const input) const
{
return thrust::get<1>(input) != 0;
}
};
/**
* @brief Building a histogram by gathering distinct rows from the input table and their
* corresponding distinct counts.
*
* @param input The input table
* @param distinct_indices Indices of the distinct rows
* @param distinct_counts Distinct counts corresponding to the distinct rows
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned object's device memory
* @return A list_scalar storing the output histogram
*/
auto gather_histogram(table_view const& input,
device_span<size_type const> distinct_indices,
std::unique_ptr<column>&& distinct_counts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto distinct_rows = cudf::detail::gather(input,
distinct_indices,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
std::vector<std::unique_ptr<column>> struct_children;
struct_children.emplace_back(std::move(distinct_rows->release().front()));
struct_children.emplace_back(std::move(distinct_counts));
auto output_structs = make_structs_column(
static_cast<size_type>(distinct_indices.size()), std::move(struct_children), 0, {}, stream, mr);
return std::make_unique<cudf::list_scalar>(
std::move(*output_structs.release()), true, stream, mr);
}
} // namespace
std::unique_ptr<column> make_empty_histogram_like(column_view const& values)
{
std::vector<std::unique_ptr<column>> struct_children;
struct_children.emplace_back(empty_like(values));
struct_children.emplace_back(make_numeric_column(data_type{type_id::INT64}, 0));
return std::make_unique<column>(data_type{type_id::STRUCT},
0,
rmm::device_buffer{},
rmm::device_buffer{},
0,
std::move(struct_children));
}
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>, std::unique_ptr<column>>
compute_row_frequencies(table_view const& input,
std::optional<column_view> const& partial_counts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const has_nested_columns = cudf::detail::has_nested_columns(input);
// Nested types are not tested, thus we just throw exception if we see such input for now.
// We should remove this check after having enough tests.
CUDF_EXPECTS(!has_nested_columns,
"Nested types are not yet supported in histogram aggregation.",
std::invalid_argument);
auto map = cudf::detail::hash_map_type{
compute_hash_table_size(input.num_rows()),
cuco::empty_key{-1},
cuco::empty_value{std::numeric_limits<size_type>::min()},
cudf::detail::hash_table_allocator_type{default_allocator<char>{}, stream},
stream.value()};
auto const preprocessed_input =
cudf::experimental::row::hash::preprocessed_table::create(input, stream);
auto const has_nulls = nullate::DYNAMIC{cudf::has_nested_nulls(input)};
auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input);
auto const key_hasher = row_hasher.device_hasher(has_nulls);
auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input);
auto const pair_iter = cudf::detail::make_counting_transform_iterator(
size_type{0}, [] __device__(size_type const i) { return cuco::make_pair(i, i); });
// Always compare NaNs as equal.
using nan_equal_comparator =
cudf::experimental::row::equality::nan_equal_physical_equality_comparator;
auto const value_comp = nan_equal_comparator{};
if (has_nested_columns) {
auto const key_equal = row_comp.equal_to<true>(has_nulls, null_equality::EQUAL, value_comp);
map.insert(pair_iter, pair_iter + input.num_rows(), key_hasher, key_equal, stream.value());
} else {
auto const key_equal = row_comp.equal_to<false>(has_nulls, null_equality::EQUAL, value_comp);
map.insert(pair_iter, pair_iter + input.num_rows(), key_hasher, key_equal, stream.value());
}
// Gather the indices of distinct rows.
auto distinct_indices = std::make_unique<rmm::device_uvector<size_type>>(
static_cast<size_type>(map.get_size()), stream, mr);
// Store the number of occurrences of each distinct row.
auto distinct_counts = make_numeric_column(data_type{type_to_id<histogram_count_type>()},
static_cast<size_type>(map.get_size()),
mask_state::UNALLOCATED,
stream,
mr);
// Compute frequencies (aka distinct counts) for the input rows.
// Note that we consider null and NaNs as always equal.
auto const reduction_results = cudf::detail::hash_reduce_by_row(
map,
preprocessed_input,
input.num_rows(),
has_nulls,
has_nested_columns,
null_equality::EQUAL,
nan_equality::ALL_EQUAL,
reduce_func_builder<histogram_count_type>{
partial_counts ? partial_counts.value().begin<histogram_count_type>() : nullptr},
histogram_count_type{0},
stream,
rmm::mr::get_current_device_resource());
auto const input_it = thrust::make_zip_iterator(
thrust::make_tuple(thrust::make_counting_iterator(0), reduction_results.begin()));
auto const output_it = thrust::make_zip_iterator(thrust::make_tuple(
distinct_indices->begin(), distinct_counts->mutable_view().begin<histogram_count_type>()));
// Reduction results above are either group sizes of equal rows, or `0`.
// The final output is non-zero group sizes only.
thrust::copy_if(
rmm::exec_policy(stream), input_it, input_it + input.num_rows(), output_it, is_not_zero{});
return {std::move(distinct_indices), std::move(distinct_counts)};
}
std::unique_ptr<cudf::scalar> histogram(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Empty group should be handled before reaching here.
CUDF_EXPECTS(input.size() > 0, "Input should not be empty.", std::invalid_argument);
auto const input_tv = table_view{{input}};
auto [distinct_indices, distinct_counts] =
compute_row_frequencies(input_tv, std::nullopt, stream, mr);
return gather_histogram(input_tv, *distinct_indices, std::move(distinct_counts), stream, mr);
}
std::unique_ptr<cudf::scalar> merge_histogram(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Empty group should be handled before reaching here.
CUDF_EXPECTS(input.size() > 0, "Input should not be empty.", std::invalid_argument);
CUDF_EXPECTS(!input.has_nulls(), "The input column must not have nulls.", std::invalid_argument);
CUDF_EXPECTS(input.type().id() == type_id::STRUCT && input.num_children() == 2,
"The input must be a structs column having two children.",
std::invalid_argument);
CUDF_EXPECTS(cudf::is_integral(input.child(1).type()) && !input.child(1).has_nulls(),
"The second child of the input column must be of integral type and without nulls.",
std::invalid_argument);
auto const structs_cv = structs_column_view{input};
auto const input_values = structs_cv.get_sliced_child(0, stream);
auto const input_counts = structs_cv.get_sliced_child(1, stream);
auto const values_tv = table_view{{input_values}};
auto [distinct_indices, distinct_counts] =
compute_row_frequencies(values_tv, input_counts, stream, mr);
return gather_histogram(values_tv, *distinct_indices, std::move(distinct_counts), stream, mr);
}
} // namespace cudf::reduction::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/reductions/std.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/reduction/detail/reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::scalar> standard_deviation(column_view const& col,
cudf::data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// TODO: add cuda version check when the fix is available
#if !defined(__CUDACC_DEBUG__)
using reducer = compound::detail::element_type_dispatcher<op::standard_deviation>;
auto col_type =
cudf::is_dictionary(col.type()) ? dictionary_column_view(col).keys().type() : col.type();
return cudf::type_dispatcher(col_type, reducer(), col, output_dtype, ddof, stream, mr);
#else
// workaround for bug 200529165 which causes compilation error only at device debug build
// hopefully the bug will be fixed in future cuda version (still failing in 11.2)
CUDF_FAIL("var/std reductions are not supported at debug build.");
#endif
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/scan/scan_exclusive.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "scan.cuh"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/null_mask.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/scan.h>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Dispatcher for running a scan operation on an input column
*
* @tparam Op device binary operator (e.g. min, max, sum)
*/
template <typename Op>
struct scan_dispatcher {
public:
/**
* @brief Creates a new column from input column by applying exclusive scan operation
*
* @tparam T type of input column
*
* @param input Input column view
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column with scan results
*/
template <typename T, std::enable_if_t<cuda::std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
bitmask_type const*,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output_column =
detail::allocate_like(input, input.size(), mask_allocation_policy::NEVER, stream, mr);
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input, stream);
auto identity = Op::template identity<T>();
auto begin = make_null_replacement_iterator(*d_input, identity, input.has_nulls());
thrust::exclusive_scan(
rmm::exec_policy(stream), begin, begin + input.size(), output.data<T>(), identity, Op{});
CUDF_CHECK_CUDA(stream.value());
return output_column;
}
template <typename T, typename... Args>
std::enable_if_t<not cuda::std::is_arithmetic_v<T>, std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Non-arithmetic types not supported for exclusive scan");
}
};
} // namespace
std::unique_ptr<column> scan_exclusive(column_view const& input,
scan_aggregation const& agg,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [mask, null_count] = [&] {
if (null_handling == null_policy::EXCLUDE) {
return std::make_pair(std::move(detail::copy_bitmask(input, stream, mr)), input.null_count());
} else if (input.nullable()) {
return mask_scan(input, scan_type::EXCLUSIVE, stream, mr);
}
return std::make_pair(rmm::device_buffer{}, size_type{0});
}();
auto output = scan_agg_dispatch<scan_dispatcher>(
input, agg, static_cast<bitmask_type*>(mask.data()), stream, mr);
output->set_null_mask(mask, null_count);
return output;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/scan/scan.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/reduction.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <utility>
namespace cudf {
namespace detail {
// logical-and scan of the null mask of the input view
std::pair<rmm::device_buffer, size_type> mask_scan(column_view const& input_view,
scan_type inclusive,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
template <template <typename> typename DispatchFn>
std::unique_ptr<column> scan_agg_dispatch(column_view const& input,
scan_aggregation const& agg,
bitmask_type const* output_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
switch (agg.kind) {
case aggregation::SUM:
return type_dispatcher<dispatch_storage_type>(
input.type(), DispatchFn<DeviceSum>(), input, output_mask, stream, mr);
case aggregation::MIN:
return type_dispatcher<dispatch_storage_type>(
input.type(), DispatchFn<DeviceMin>(), input, output_mask, stream, mr);
case aggregation::MAX:
return type_dispatcher<dispatch_storage_type>(
input.type(), DispatchFn<DeviceMax>(), input, output_mask, stream, mr);
case aggregation::PRODUCT:
// a product scan on a decimal type with non-zero scale would result in each element having
// a different scale, and because scale is stored once per column, this is not possible
if (is_fixed_point(input.type())) CUDF_FAIL("decimal32/64/128 cannot support product scan");
return type_dispatcher<dispatch_storage_type>(
input.type(), DispatchFn<DeviceProduct>(), input, output_mask, stream, mr);
default: CUDF_FAIL("Unsupported aggregation operator for scan");
}
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/scan/scan_inclusive.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <reductions/scan/scan.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/reduction.hpp>
#include <cudf/strings/detail/scan.hpp>
#include <cudf/structs/detail/scan.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/find.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
namespace cudf {
namespace detail {
// logical-and scan of the null mask of the input view
std::pair<rmm::device_buffer, size_type> mask_scan(column_view const& input_view,
scan_type inclusive,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer mask =
detail::create_null_mask(input_view.size(), mask_state::UNINITIALIZED, stream, mr);
auto d_input = column_device_view::create(input_view, stream);
auto valid_itr = detail::make_validity_iterator(*d_input);
auto first_null_position = [&] {
size_type const first_null =
thrust::find_if_not(
rmm::exec_policy(stream), valid_itr, valid_itr + input_view.size(), thrust::identity{}) -
valid_itr;
size_type const exclusive_offset = (inclusive == scan_type::EXCLUSIVE) ? 1 : 0;
return std::min(input_view.size(), first_null + exclusive_offset);
}();
set_null_mask(static_cast<bitmask_type*>(mask.data()), 0, first_null_position, true, stream);
set_null_mask(
static_cast<bitmask_type*>(mask.data()), first_null_position, input_view.size(), false, stream);
return {std::move(mask), input_view.size() - first_null_position};
}
namespace {
template <typename Op, typename T>
struct scan_functor {
static std::unique_ptr<column> invoke(column_view const& input_view,
bitmask_type const*,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output_column = detail::allocate_like(
input_view, input_view.size(), mask_allocation_policy::NEVER, stream, mr);
mutable_column_view result = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
auto const begin =
make_null_replacement_iterator(*d_input, Op::template identity<T>(), input_view.has_nulls());
thrust::inclusive_scan(
rmm::exec_policy(stream), begin, begin + input_view.size(), result.data<T>(), Op{});
CUDF_CHECK_CUDA(stream.value());
return output_column;
}
};
template <typename Op>
struct scan_functor<Op, cudf::string_view> {
static std::unique_ptr<column> invoke(column_view const& input_view,
bitmask_type const* mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::strings::detail::scan_inclusive<Op>(input_view, mask, stream, mr);
}
};
template <typename Op>
struct scan_functor<Op, cudf::struct_view> {
static std::unique_ptr<column> invoke(column_view const& input,
bitmask_type const*,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::structs::detail::scan_inclusive<Op>(input, stream, mr);
}
};
/**
* @brief Dispatcher for running a Scan operation on an input column
*
* @tparam Op device binary operator
*/
template <typename Op>
struct scan_dispatcher {
private:
template <typename T>
static constexpr bool is_supported()
{
if constexpr (std::is_same_v<T, cudf::struct_view>) {
return std::is_same_v<Op, DeviceMin> || std::is_same_v<Op, DeviceMax>;
} else {
return std::is_invocable_v<Op, T, T> && !cudf::is_dictionary<T>();
}
}
public:
/**
* @brief Creates a new column from the input column by applying the scan operation
*
* @param input Input column view
* @param null_handling How null row entries are to be processed
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return
*
* @tparam T type of input column
*/
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
bitmask_type const* output_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return scan_functor<Op, T>::invoke(input, output_mask, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Unsupported type for inclusive scan operation");
}
};
} // namespace
std::unique_ptr<column> scan_inclusive(column_view const& input,
scan_aggregation const& agg,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [mask, null_count] = [&] {
if (null_handling == null_policy::EXCLUDE) {
return std::make_pair(std::move(detail::copy_bitmask(input, stream, mr)), input.null_count());
} else if (input.nullable()) {
return mask_scan(input, scan_type::INCLUSIVE, stream, mr);
}
return std::make_pair(rmm::device_buffer{}, size_type{0});
}();
auto output = scan_agg_dispatch<scan_dispatcher>(
input, agg, static_cast<bitmask_type*>(mask.data()), stream, mr);
output->set_null_mask(mask, null_count);
// If the input is a structs column, we also need to push down nulls from the parent output column
// into the children columns.
if (input.type().id() == type_id::STRUCT && output->has_nulls()) {
auto const num_rows = output->size();
auto const null_count = output->null_count();
auto content = output->release();
// Build new children columns.
auto const null_mask = reinterpret_cast<bitmask_type const*>(content.null_mask->data());
std::for_each(content.children.begin(),
content.children.end(),
[null_mask, null_count, stream, mr](auto& child) {
child = structs::detail::superimpose_nulls(
null_mask, null_count, std::move(child), stream, mr);
});
// Replace the children columns.
output = cudf::make_structs_column(
num_rows, std::move(content.children), null_count, std::move(*content.null_mask), stream, mr);
}
return output;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/scan/rank_scan.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
namespace {
template <typename device_comparator_type, typename value_resolver>
struct rank_equality_functor {
rank_equality_functor(device_comparator_type comparator, value_resolver resolver)
: _comparator(comparator), _resolver(resolver)
{
}
auto __device__ operator()(size_type row_index) const noexcept
{
return _resolver(row_index == 0 || !_comparator(row_index, row_index - 1), row_index);
}
private:
device_comparator_type _comparator;
value_resolver _resolver;
};
/**
* @brief generate row ranks or dense ranks using a row comparison then scan the results
*
* @tparam value_resolver flag value resolver with boolean first and row number arguments
* @tparam scan_operator scan function ran on the flag values
* @param order_by input column to generate ranks for
* @param resolver flag value resolver
* @param scan_op scan operation ran on the flag results
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return std::unique_ptr<column> rank values
*/
template <typename value_resolver, typename scan_operator>
std::unique_ptr<column> rank_generator(column_view const& order_by,
value_resolver resolver,
scan_operator scan_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const order_by_tview = table_view{{order_by}};
auto comp = cudf::experimental::row::equality::self_comparator(order_by_tview, stream);
auto ranks = make_fixed_width_column(
data_type{type_to_id<size_type>()}, order_by.size(), mask_state::UNALLOCATED, stream, mr);
auto mutable_ranks = ranks->mutable_view();
auto const comparator_helper = [&](auto const device_comparator) {
thrust::tabulate(rmm::exec_policy(stream),
mutable_ranks.begin<size_type>(),
mutable_ranks.end<size_type>(),
rank_equality_functor<decltype(device_comparator), value_resolver>(
device_comparator, resolver));
};
if (cudf::detail::has_nested_columns(order_by_tview)) {
auto const device_comparator =
comp.equal_to<true>(nullate::DYNAMIC{has_nested_nulls(table_view({order_by}))});
comparator_helper(device_comparator);
} else {
auto const device_comparator =
comp.equal_to<false>(nullate::DYNAMIC{has_nested_nulls(table_view({order_by}))});
comparator_helper(device_comparator);
}
thrust::inclusive_scan(rmm::exec_policy(stream),
mutable_ranks.begin<size_type>(),
mutable_ranks.end<size_type>(),
mutable_ranks.begin<size_type>(),
scan_op);
return ranks;
}
} // namespace
std::unique_ptr<column> inclusive_dense_rank_scan(column_view const& order_by,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return rank_generator(
order_by,
[] __device__(bool const unequal, size_type const) { return unequal ? 1 : 0; },
DeviceSum{},
stream,
mr);
}
std::unique_ptr<column> inclusive_rank_scan(column_view const& order_by,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!cudf::structs::detail::is_or_has_nested_lists(order_by),
"Unsupported list type in rank scan.");
return rank_generator(
order_by,
[] __device__(bool unequal, auto row_index) { return unequal ? row_index + 1 : 0; },
DeviceMax{},
stream,
mr);
}
std::unique_ptr<column> inclusive_one_normalized_percent_rank_scan(
column_view const& order_by, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
auto const rank_column =
inclusive_rank_scan(order_by, stream, rmm::mr::get_current_device_resource());
auto const rank_view = rank_column->view();
// Result type for min 0-index percent rank is independent of input type.
using result_type = double;
auto percent_rank_result = cudf::make_fixed_width_column(
data_type{type_to_id<result_type>()}, rank_view.size(), mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
rank_view.begin<size_type>(),
rank_view.end<size_type>(),
percent_rank_result->mutable_view().begin<result_type>(),
[n_rows = rank_view.size()] __device__(auto const rank) {
return n_rows == 1 ? 0.0 : ((rank - 1.0) / (n_rows - 1));
});
return percent_rank_result;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/scan/scan.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scan.hpp>
#include <cudf/reduction.hpp>
#include <cudf/utilities/default_stream.hpp>
namespace cudf {
namespace detail {
std::unique_ptr<column> scan(column_view const& input,
scan_aggregation const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (agg.kind == aggregation::RANK) {
CUDF_EXPECTS(inclusive == scan_type::INCLUSIVE,
"Rank aggregation operator requires an inclusive scan");
auto const& rank_agg = static_cast<cudf::detail::rank_aggregation const&>(agg);
if (rank_agg._method == rank_method::MIN) {
if (rank_agg._percentage == rank_percentage::NONE) {
return inclusive_rank_scan(input, stream, mr);
} else if (rank_agg._percentage == rank_percentage::ONE_NORMALIZED) {
return inclusive_one_normalized_percent_rank_scan(input, stream, mr);
}
} else if (rank_agg._method == rank_method::DENSE) {
return inclusive_dense_rank_scan(input, stream, mr);
}
CUDF_FAIL("Unsupported rank aggregation method for inclusive scan");
}
return inclusive == scan_type::EXCLUSIVE
? detail::scan_exclusive(input, agg, null_handling, stream, mr)
: detail::scan_inclusive(input, agg, null_handling, stream, mr);
}
} // namespace detail
std::unique_ptr<column> scan(column_view const& input,
scan_aggregation const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::scan(input, agg, inclusive, null_handling, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/update_validity.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <optional>
namespace cudf {
namespace reduction {
namespace detail {
/**
* @brief Compute the validity mask and set it on the result column
*
* If `null_handling == null_policy::INCLUDE`, all elements in a segment must be valid for the
* reduced value to be valid.
* If `null_handling == null_policy::EXCLUDE`, the reduced value is valid if any element
* in the segment is valid.
*
* @param result Result of segmented reduce to update the null mask
* @param col Input column before reduce
* @param offsets Indices to segment boundaries
* @param null_handling How null entries are processed within each segment
* @param init Optional initial value
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*/
void segmented_update_validity(column& result,
column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/sum_of_squares.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_sum_of_squares(column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = simple::detail::column_type_dispatcher<op::sum_of_squares>;
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, output_dtype, null_handling, std::nullopt, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/sum.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_sum(
column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = simple::detail::column_type_dispatcher<op::sum>;
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, output_dtype, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/product.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_product(
column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = simple::detail::column_type_dispatcher<op::product>;
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, output_dtype, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/reductions.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
struct segmented_reduce_dispatch_functor {
column_view const& col;
device_span<size_type const> offsets;
data_type output_dtype;
null_policy null_handling;
std::optional<std::reference_wrapper<scalar const>> init;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
segmented_reduce_dispatch_functor(column_view const& segmented_values,
device_span<size_type const> offsets,
data_type output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: col(segmented_values),
offsets(offsets),
output_dtype(output_dtype),
null_handling(null_handling),
init(init),
stream(stream),
mr(mr)
{
}
segmented_reduce_dispatch_functor(column_view const& segmented_values,
device_span<size_type const> offsets,
data_type output_dtype,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: segmented_reduce_dispatch_functor(
segmented_values, offsets, output_dtype, null_handling, std::nullopt, stream, mr)
{
}
template <segmented_reduce_aggregation::Kind k>
std::unique_ptr<column> operator()(segmented_reduce_aggregation const& agg)
{
switch (k) {
case segmented_reduce_aggregation::SUM:
return segmented_sum(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::PRODUCT:
return segmented_product(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::MIN:
return segmented_min(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::MAX:
return segmented_max(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::ANY:
return segmented_any(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::ALL:
return segmented_all(col, offsets, output_dtype, null_handling, init, stream, mr);
case segmented_reduce_aggregation::SUM_OF_SQUARES:
return segmented_sum_of_squares(col, offsets, output_dtype, null_handling, stream, mr);
case segmented_reduce_aggregation::MEAN:
return segmented_mean(col, offsets, output_dtype, null_handling, stream, mr);
case segmented_reduce_aggregation::VARIANCE: {
auto var_agg = static_cast<cudf::detail::var_aggregation const&>(agg);
return segmented_variance(
col, offsets, output_dtype, null_handling, var_agg._ddof, stream, mr);
}
case segmented_reduce_aggregation::STD: {
auto var_agg = static_cast<cudf::detail::std_aggregation const&>(agg);
return segmented_standard_deviation(
col, offsets, output_dtype, null_handling, var_agg._ddof, stream, mr);
}
case segmented_reduce_aggregation::NUNIQUE:
return segmented_nunique(col, offsets, null_handling, stream, mr);
default: CUDF_FAIL("Unsupported aggregation type.");
}
}
};
std::unique_ptr<column> segmented_reduce(column_view const& segmented_values,
device_span<size_type const> offsets,
segmented_reduce_aggregation const& agg,
data_type output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!init.has_value() || segmented_values.type() == init.value().get().type(),
"column and initial value must be the same type");
if (init.has_value() && !(agg.kind == aggregation::SUM || agg.kind == aggregation::PRODUCT ||
agg.kind == aggregation::MIN || agg.kind == aggregation::MAX ||
agg.kind == aggregation::ANY || agg.kind == aggregation::ALL)) {
CUDF_FAIL(
"Initial value is only supported for SUM, PRODUCT, MIN, MAX, ANY, and ALL aggregation types");
}
CUDF_EXPECTS(offsets.size() > 0, "`offsets` should have at least 1 element.");
return cudf::detail::aggregation_dispatcher(
agg.kind,
segmented_reduce_dispatch_functor{
segmented_values, offsets, output_dtype, null_handling, init, stream, mr},
agg);
}
} // namespace detail
} // namespace reduction
std::unique_ptr<column> segmented_reduce(column_view const& segmented_values,
device_span<size_type const> offsets,
segmented_reduce_aggregation const& agg,
data_type output_dtype,
null_policy null_handling,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return reduction::detail::segmented_reduce(segmented_values,
offsets,
agg,
output_dtype,
null_handling,
std::nullopt,
cudf::get_default_stream(),
mr);
}
std::unique_ptr<column> segmented_reduce(column_view const& segmented_values,
device_span<size_type const> offsets,
segmented_reduce_aggregation const& agg,
data_type output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return reduction::detail::segmented_reduce(segmented_values,
offsets,
agg,
output_dtype,
null_handling,
init,
cudf::get_default_stream(),
mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/update_validity.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "update_validity.hpp"
#include <cudf/detail/null_mask.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/span.hpp>
namespace cudf {
namespace reduction {
namespace detail {
void segmented_update_validity(column& result,
column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto [output_null_mask, output_null_count] = cudf::detail::segmented_null_mask_reduction(
col.null_mask(),
offsets.begin(),
offsets.end() - 1,
offsets.begin() + 1,
null_handling,
init.has_value() ? std::optional(init.value().get().is_valid()) : std::nullopt,
stream,
mr);
result.set_null_mask(std::move(output_null_mask), output_null_count);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/mean.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_mean(column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = compound::detail::compound_segmented_dispatcher<op::mean>;
constexpr size_type ddof = 1; // ddof for mean calculation
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, output_dtype, null_handling, ddof, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/compound.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "counts.hpp"
#include "update_validity.hpp"
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.cuh>
#include <cudf/reduction/detail/segmented_reduction.cuh>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/transform_iterator.h>
namespace cudf {
namespace reduction {
namespace compound {
namespace detail {
/**
* @brief Multi-step reduction for operations such as mean, variance, and standard deviation.
*
* @tparam InputType the input column data-type
* @tparam ResultType the output data-type
* @tparam Op the compound operator derived from `cudf::reduction::op::compound_op`
*
* @param col Input column view
* @param offsets Indices identifying segments
* @param null_handling Indicates if null elements should be included in the reduction
* @param ddof Delta degrees of freedom used for standard deviation and variance.
* The divisor used is N - ddof, where N represents the number of elements.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Segmented reduce result
*/
template <typename InputType, typename ResultType, typename Op>
std::unique_ptr<column> compound_segmented_reduction(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto d_col = cudf::column_device_view::create(col, stream);
auto compound_op = Op{};
auto const num_segments = offsets.size() - 1;
auto result = make_fixed_width_column(
data_type{type_to_id<ResultType>()}, num_segments, mask_state::UNALLOCATED, stream, mr);
auto out_itr = result->mutable_view().template begin<ResultType>();
// Compute counts
rmm::device_uvector<size_type> counts =
cudf::reduction::detail::segmented_counts(col.null_mask(),
col.has_nulls(),
offsets,
null_handling,
stream,
rmm::mr::get_current_device_resource());
// Run segmented reduction
if (col.has_nulls()) {
auto nrt = compound_op.template get_null_replacing_element_transformer<ResultType>();
auto itr = thrust::make_transform_iterator(d_col->pair_begin<InputType, true>(), nrt);
cudf::reduction::detail::segmented_reduce(
itr, offsets.begin(), offsets.end(), out_itr, compound_op, ddof, counts.data(), stream);
} else {
auto et = compound_op.template get_element_transformer<ResultType>();
auto itr = thrust::make_transform_iterator(d_col->begin<InputType>(), et);
cudf::reduction::detail::segmented_reduce(
itr, offsets.begin(), offsets.end(), out_itr, compound_op, ddof, counts.data(), stream);
}
// Compute the output null mask
cudf::reduction::detail::segmented_update_validity(
*result, col, offsets, null_handling, std::nullopt, stream, mr);
return result;
};
template <typename ElementType, typename Op>
struct compound_float_output_dispatcher {
private:
template <typename ResultType>
static constexpr bool is_supported_v()
{
return std::is_floating_point_v<ResultType>;
}
public:
template <typename ResultType, std::enable_if_t<is_supported_v<ResultType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return compound_segmented_reduction<ElementType, ResultType, Op>(
col, offsets, null_handling, ddof, stream, mr);
}
template <typename ResultType, std::enable_if_t<not is_supported_v<ResultType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const&,
device_span<size_type const>,
null_policy,
size_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Unsupported output data type");
}
};
template <typename Op>
struct compound_segmented_dispatcher {
private:
template <typename ElementType>
static constexpr bool is_supported_v()
{
return std::is_arithmetic_v<ElementType>;
}
public:
template <typename ElementType, std::enable_if_t<is_supported_v<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(output_dtype,
compound_float_output_dispatcher<ElementType, Op>(),
col,
offsets,
null_handling,
ddof,
stream,
mr);
}
template <typename ElementType, std::enable_if_t<not is_supported_v<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const&,
device_span<size_type const>,
cudf::data_type const,
null_policy,
size_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Compound operators are not supported for non-arithmetic types");
}
};
} // namespace detail
} // namespace compound
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/min.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_min(
column_view const& col,
device_span<size_type const> offsets,
data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(col.type() == output_dtype,
"segmented_min() operation requires matching output type");
using reducer = simple::detail::same_column_type_dispatcher<op::min>;
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/any.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_any(
column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8),
"segmented_any() operation requires output type `BOOL8`");
using reducer = simple::detail::bool_result_column_dispatcher<op::max>;
// A maximum over bool types is used to implement any()
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/all.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_all(
column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8),
"segmented_all() operation requires output type `BOOL8`");
using reducer = simple::detail::bool_result_column_dispatcher<op::min>;
// A minimum over bool types is used to implement all()
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/max.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simple.cuh"
#include <cudf/reduction/detail/reduction_functions.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_max(
column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(col.type() == output_dtype,
"segmented_max() operation requires matching output type");
using reducer = simple::detail::same_column_type_dispatcher<op::max>;
return cudf::type_dispatcher(
col.type(), reducer{}, col, offsets, null_handling, init, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/var.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_variance(column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = compound::detail::compound_segmented_dispatcher<op::variance>;
return cudf::type_dispatcher(
col.type(), reducer(), col, offsets, output_dtype, null_handling, ddof, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/counts.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "counts.hpp"
#include <cudf/detail/null_mask.cuh>
#include <thrust/adjacent_difference.h>
namespace cudf {
namespace reduction {
namespace detail {
rmm::device_uvector<size_type> segmented_counts(bitmask_type const* null_mask,
bool has_nulls,
device_span<size_type const> offsets,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_segments = offsets.size() - 1;
if (has_nulls && (null_handling == null_policy::EXCLUDE)) {
return cudf::detail::segmented_count_bits(null_mask,
offsets.begin(),
offsets.end() - 1,
offsets.begin() + 1,
cudf::detail::count_bits_policy::SET_BITS,
stream,
mr);
}
rmm::device_uvector<size_type> valid_counts(num_segments, stream, mr);
thrust::adjacent_difference(
rmm::exec_policy(stream), offsets.begin() + 1, offsets.end(), valid_counts.begin());
return valid_counts;
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/counts.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
namespace cudf {
class column_device_view;
namespace reduction {
namespace detail {
/**
* @brief Compute the number of elements per segment
*
* If `null_handling == null_policy::EXCLUDE`, the count for each
* segment omits any null entries. Otherwise, this returns the number
* of elements in each segment.
*
* @param null_mask Null values over which the segment offsets apply
* @param has_nulls True if d_col contains any nulls
* @param offsets Indices to segment boundaries
* @param null_handling How null entries are processed within each segment
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The number of elements in each segment
*/
rmm::device_uvector<size_type> segmented_counts(bitmask_type const* null_mask,
bool has_nulls,
device_span<size_type const> offsets,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/simple.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "counts.hpp"
#include "update_validity.hpp"
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/element_argminmax.cuh>
#include <cudf/detail/valid_if.cuh>
#include <cudf/reduction/detail/segmented_reduction.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <optional>
#include <type_traits>
namespace cudf {
namespace reduction {
namespace simple {
namespace detail {
/**
* @brief Segment reduction for 'sum', 'product', 'min', 'max', 'sum of squares', etc
* which directly compute the reduction by a single step reduction call.
*
* @tparam InputType the input column data-type
* @tparam ResultType the output data-type
* @tparam Op the operator of cudf::reduction::op::
* @param col Input column of data to reduce
* @param offsets Indices to segment boundaries
* @param null_handling How null entries are processed within each segment
* @param init Optional initial value of the reduction
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column in device memory
*/
template <typename InputType, typename ResultType, typename Op>
std::unique_ptr<column> simple_segmented_reduction(
column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dcol = cudf::column_device_view::create(col, stream);
auto simple_op = Op{};
auto const num_segments = offsets.size() - 1;
auto const binary_op = simple_op.get_binary_op();
// Cast initial value
ResultType initial_value = [&] {
if (init.has_value() && init.value().get().is_valid()) {
using ScalarType = cudf::scalar_type_t<InputType>;
auto input_value = static_cast<ScalarType const*>(&init.value().get())->value(stream);
return static_cast<ResultType>(input_value);
} else {
return simple_op.template get_identity<ResultType>();
}
}();
auto const result_type =
cudf::is_fixed_point(col.type()) ? col.type() : data_type{type_to_id<ResultType>()};
auto result =
make_fixed_width_column(result_type, num_segments, mask_state::UNALLOCATED, stream, mr);
auto outit = result->mutable_view().template begin<ResultType>();
if (col.has_nulls()) {
auto f = simple_op.template get_null_replacing_element_transformer<ResultType>();
auto it = thrust::make_transform_iterator(dcol->pair_begin<InputType, true>(), f);
cudf::reduction::detail::segmented_reduce(
it, offsets.begin(), offsets.end(), outit, binary_op, initial_value, stream);
} else {
auto f = simple_op.template get_element_transformer<ResultType>();
auto it = thrust::make_transform_iterator(dcol->begin<InputType>(), f);
cudf::reduction::detail::segmented_reduce(
it, offsets.begin(), offsets.end(), outit, binary_op, initial_value, stream);
}
// Compute the output null mask
cudf::reduction::detail::segmented_update_validity(
*result, col, offsets, null_handling, init, stream, mr);
return result;
}
/**
* @brief String segmented reduction for 'min', 'max'.
*
* This algorithm uses argmin/argmax as a custom comparator to build a gather
* map, then builds the output.
*
* @tparam InputType the input column data-type
* @tparam Op the operator of cudf::reduction::op::
* @param col Input column of data to reduce
* @param offsets Indices to segment boundaries
* @param null_handling How null entries are processed within each segment
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column in device memory
*/
template <typename InputType,
typename Op,
CUDF_ENABLE_IF(std::is_same_v<Op, cudf::reduction::detail::op::min> ||
std::is_same_v<Op, cudf::reduction::detail::op::max>)>
std::unique_ptr<column> string_segmented_reduction(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Pass to simple_segmented_reduction, get indices to gather, perform gather here.
auto device_col = cudf::column_device_view::create(col, stream);
auto it = thrust::make_counting_iterator(0);
auto const num_segments = static_cast<size_type>(offsets.size()) - 1;
bool constexpr is_argmin = std::is_same_v<Op, cudf::reduction::detail::op::min>;
auto string_comparator =
cudf::detail::element_argminmax_fn<InputType>{*device_col, col.has_nulls(), is_argmin};
auto constexpr identity =
is_argmin ? cudf::detail::ARGMIN_SENTINEL : cudf::detail::ARGMAX_SENTINEL;
auto gather_map = make_fixed_width_column(
data_type{type_to_id<size_type>()}, num_segments, mask_state::UNALLOCATED, stream, mr);
auto gather_map_it = gather_map->mutable_view().begin<size_type>();
cudf::reduction::detail::segmented_reduce(
it, offsets.begin(), offsets.end(), gather_map_it, string_comparator, identity, stream);
auto result = std::move(cudf::detail::gather(table_view{{col}},
*gather_map,
cudf::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release()[0]);
// Compute the output null mask
cudf::reduction::detail::segmented_update_validity(
*result, col, offsets, null_handling, std::nullopt, stream, mr);
return result;
}
template <typename InputType,
typename Op,
CUDF_ENABLE_IF(!std::is_same_v<Op, cudf::reduction::detail::op::min>() &&
!std::is_same_v<Op, cudf::reduction::detail::op::max>())>
std::unique_ptr<column> string_segmented_reduction(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Segmented reduction on string column only supports min and max reduction.");
}
/**
* @brief Specialization for fixed-point segmented reduction
*
* @tparam InputType the input column data-type
* @tparam Op the operator of cudf::reduction::op::
* @param col Input column of data to reduce
* @param offsets Indices to segment boundaries
* @param null_handling How null entries are processed within each segment
* @param stream Used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column in device memory
*/
template <typename InputType, typename Op>
std::unique_ptr<column> fixed_point_segmented_reduction(
column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using RepType = device_storage_type_t<InputType>;
auto result =
simple_segmented_reduction<RepType, RepType, Op>(col, offsets, null_handling, init, stream, mr);
auto const scale = [&] {
if constexpr (std::is_same_v<Op, cudf::reduction::detail::op::product>) {
// The product aggregation requires updating the scale of the fixed-point output column.
// The output scale needs to be the maximum count of all segments multiplied by
// the input scale value.
rmm::device_uvector<size_type> const counts =
cudf::reduction::detail::segmented_counts(col.null_mask(),
col.has_nulls(),
offsets,
null_policy::EXCLUDE, // do not count nulls
stream,
rmm::mr::get_current_device_resource());
auto const max_count = thrust::reduce(rmm::exec_policy(stream),
counts.begin(),
counts.end(),
size_type{0},
thrust::maximum<size_type>{});
auto const new_scale = numeric::scale_type{col.type().scale() * max_count};
// adjust values in each segment to match the new scale
auto const d_col = column_device_view::create(col, stream);
thrust::transform(rmm::exec_policy(stream),
d_col->begin<InputType>(),
d_col->end<InputType>(),
d_col->begin<InputType>(),
[new_scale] __device__(auto fp) { return fp.rescaled(new_scale); });
return new_scale;
}
if constexpr (std::is_same_v<Op, cudf::reduction::detail::op::sum_of_squares>) {
return numeric::scale_type{col.type().scale() * 2};
}
return numeric::scale_type{col.type().scale()};
}();
auto const size = result->size(); // get these before
auto const null_count = result->null_count(); // release() is called
auto contents = result->release();
return std::make_unique<column>(data_type{type_to_id<InputType>(), scale},
size,
std::move(*(contents.data.release())),
std::move(*(contents.null_mask.release())),
null_count);
}
/**
* @brief Call reduce and return a column of type bool.
*
* This is used by operations `any()` and `all()`.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct bool_result_column_dispatcher {
template <typename ElementType, std::enable_if_t<cudf::is_numeric<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return simple_segmented_reduction<ElementType, bool, Op>(
col, offsets, null_handling, init, stream, mr);
}
template <typename ElementType, std::enable_if_t<not cudf::is_numeric<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const&,
device_span<size_type const>,
null_policy,
std::optional<std::reference_wrapper<scalar const>>,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
/**
* @brief Call reduce and return a column of type matching the input column.
*
* This is used by operations `min()` and `max()`.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct same_column_type_dispatcher {
private:
template <typename ElementType>
static constexpr bool is_supported()
{
return !(cudf::is_dictionary<ElementType>() || std::is_same_v<ElementType, cudf::list_view> ||
std::is_same_v<ElementType, cudf::struct_view>);
}
public:
template <typename ElementType,
CUDF_ENABLE_IF(is_supported<ElementType>() &&
!std::is_same_v<ElementType, string_view> &&
!cudf::is_fixed_point<ElementType>())>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return simple_segmented_reduction<ElementType, ElementType, Op>(
col, offsets, null_handling, init, stream, mr);
}
template <typename ElementType,
CUDF_ENABLE_IF(is_supported<ElementType>() && std::is_same_v<ElementType, string_view>)>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (init.has_value()) { CUDF_FAIL("Initial value not supported for strings"); }
return string_segmented_reduction<ElementType, Op>(col, offsets, null_handling, stream, mr);
}
template <typename ElementType,
CUDF_ENABLE_IF(is_supported<ElementType>() && cudf::is_fixed_point<ElementType>())>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return fixed_point_segmented_reduction<ElementType, Op>(
col, offsets, null_handling, init, stream, mr);
}
template <typename ElementType, CUDF_ENABLE_IF(!is_supported<ElementType>())>
std::unique_ptr<column> operator()(column_view const&,
device_span<size_type const>,
null_policy,
std::optional<std::reference_wrapper<scalar const>>,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
/**
* @brief Call reduce and return a column of the type specified.
*
* This is used by operations such as sum(), product(), sum_of_squares(), etc
* It only supports numeric types. If the output type is not the
* same as the input type, an extra cast operation may occur.
*
* @tparam Op The reduce operation to execute on the column.
*/
template <typename Op>
struct column_type_dispatcher {
/**
* @brief Specialization for reducing floating-point column types to any output type.
*
* This is called when the output_type does not match the ElementType.
* The input values are promoted to double (via transform-iterator) for the
* reduce calculation. The result is then cast to the specified output_type.
*/
template <typename ElementType,
typename std::enable_if_t<std::is_floating_point<ElementType>::value>* = nullptr>
std::unique_ptr<column> reduce_numeric(column_view const& col,
device_span<size_type const> offsets,
data_type const output_type,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Floats are computed in double precision and then cast to the output type
auto result = simple_segmented_reduction<ElementType, double, Op>(
col, offsets, null_handling, init, stream, mr);
if (output_type == result->type()) { return result; }
return cudf::detail::cast(*result, output_type, stream, mr);
}
/**
* @brief Specialization for reducing integer column types to any output type.
*
* This is called when the output_type does not match the ElementType.
* The input values are promoted to int64_t (via transform-iterator) for the
* reduce calculation. The result is then cast to the specified output_type.
*
* For uint64_t case, the only reasonable output_type is also UINT64 and
* this is not called when the input/output types match.
*/
template <typename ElementType,
typename std::enable_if_t<std::is_integral<ElementType>::value>* = nullptr>
std::unique_ptr<column> reduce_numeric(column_view const& col,
device_span<size_type const> offsets,
data_type const output_type,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Integers are computed in int64 precision and then cast to the output type.
auto result = simple_segmented_reduction<ElementType, int64_t, Op>(
col, offsets, null_handling, init, stream, mr);
if (output_type == result->type()) { return result; }
return cudf::detail::cast(*result, output_type, stream, mr);
}
/**
* @brief Called by the type-dispatcher to reduce the input column `col` using
* the `Op` operation.
*
* @tparam ElementType The input column type or key type
* @param col Input column (must be numeric)
* @param offsets Indices to segment boundaries
* @param output_type Requested type of the output column
* @param null_handling How null entries are processed within each segment
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
*/
template <typename ElementType,
typename std::enable_if_t<cudf::is_numeric<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
data_type const output_type,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// If the output type matches the input type, then reduce using that type
if (output_type.id() == cudf::type_to_id<ElementType>()) {
return simple_segmented_reduction<ElementType, ElementType, Op>(
col, offsets, null_handling, init, stream, mr);
}
// otherwise, reduce and map to output type
return reduce_numeric<ElementType>(col, offsets, output_type, null_handling, init, stream, mr);
}
template <typename ElementType, std::enable_if_t<cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
device_span<size_type const> offsets,
data_type const output_type,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(output_type == col.type(), "Output type must be same as input column type.");
return fixed_point_segmented_reduction<ElementType, Op>(
col, offsets, null_handling, init, stream, mr);
}
template <typename ElementType,
std::enable_if_t<not cudf::is_numeric<ElementType>() and
not cudf::is_fixed_point<ElementType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const&,
device_span<size_type const>,
data_type const,
null_policy,
std::optional<std::reference_wrapper<scalar const>>,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Reduction operator not supported for this type");
}
};
} // namespace detail
} // namespace simple
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/nunique.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "update_validity.hpp"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/labeling/label_segments.cuh>
#include <cudf/reduction/detail/segmented_reduction.cuh>
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace reduction {
namespace detail {
namespace {
template <typename ComparatorType>
struct is_unique_fn {
column_device_view const d_col;
ComparatorType row_equal;
null_policy null_handling;
size_type const* offsets;
size_type const* labels;
__device__ size_type operator()(size_type idx) const
{
if (null_handling == null_policy::EXCLUDE && d_col.is_null(idx)) { return 0; }
return static_cast<size_type>(offsets[labels[idx]] == idx || (!row_equal(idx, idx - 1)));
}
};
} // namespace
std::unique_ptr<cudf::column> segmented_nunique(column_view const& col,
device_span<size_type const> offsets,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// only support non-nested types
CUDF_EXPECTS(!cudf::is_nested(col.type()),
"segmented reduce nunique only supports non-nested column types");
// compute the unique identifiers within each segment
auto const identifiers = [&] {
auto const d_col = column_device_view::create(col, stream);
auto const comparator =
cudf::experimental::row::equality::self_comparator{table_view({col}), stream};
auto const row_equal =
comparator.equal_to<false>(cudf::nullate::DYNAMIC{col.has_nulls()}, null_equality::EQUAL);
auto labels = rmm::device_uvector<size_type>(col.size(), stream);
cudf::detail::label_segments(
offsets.begin(), offsets.end(), labels.begin(), labels.end(), stream);
auto fn = is_unique_fn<decltype(row_equal)>{
*d_col, row_equal, null_handling, offsets.data(), labels.data()};
auto identifiers = rmm::device_uvector<size_type>(col.size(), stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(col.size()),
identifiers.begin(),
fn);
return identifiers;
}();
auto result = cudf::make_numeric_column(data_type(type_to_id<size_type>()),
static_cast<size_type>(offsets.size() - 1),
cudf::mask_state::UNALLOCATED,
stream,
mr);
// Sum the unique identifiers within each segment
auto add_op = op::sum{};
cudf::reduction::detail::segmented_reduce(identifiers.begin(),
offsets.begin(),
offsets.end(),
result->mutable_view().data<size_type>(),
add_op.get_binary_op(),
0,
stream);
// Compute the output null mask
// - only empty segments are tagged as null
// - nulls are counted appropriately above per null_handling policy
auto const bitmask_col = null_handling == null_policy::EXCLUDE ? col : result->view();
cudf::reduction::detail::segmented_update_validity(
*result, bitmask_col, offsets, null_policy::EXCLUDE, std::nullopt, stream, mr);
return result;
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/reductions
|
rapidsai_public_repos/cudf/cpp/src/reductions/segmented/std.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compound.cuh"
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace reduction {
namespace detail {
std::unique_ptr<cudf::column> segmented_standard_deviation(column_view const& col,
device_span<size_type const> offsets,
cudf::data_type const output_dtype,
null_policy null_handling,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using reducer = compound::detail::compound_segmented_dispatcher<op::standard_deviation>;
return cudf::type_dispatcher(
col.type(), reducer(), col, offsets, output_dtype, null_handling, ddof, stream, mr);
}
} // namespace detail
} // namespace reduction
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/linked_column.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/linked_column.hpp>
#include <thrust/iterator/transform_iterator.h>
namespace cudf::detail {
linked_column_view::linked_column_view(column_view const& col) : linked_column_view(nullptr, col) {}
linked_column_view::linked_column_view(linked_column_view* parent, column_view const& col)
: column_view_base(col), parent(parent)
{
children.reserve(col.num_children());
std::transform(
col.child_begin(), col.child_end(), std::back_inserter(children), [&](column_view const& c) {
return std::make_shared<linked_column_view>(this, c);
});
}
linked_column_view::operator column_view() const
{
auto child_it = thrust::make_transform_iterator(
children.begin(), [](auto const& c) { return static_cast<column_view>(*c); });
return column_view(this->type(),
this->size(),
this->head(),
this->null_mask(),
this->null_count(),
this->offset(),
std::vector<column_view>(child_it, child_it + children.size()));
}
LinkedColVector table_to_linked_columns(table_view const& table)
{
auto linked_it = thrust::make_transform_iterator(
table.begin(), [](auto const& c) { return std::make_shared<linked_column_view>(c); });
return LinkedColVector(linked_it, linked_it + table.num_columns());
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/stream_pool.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/logger.hpp>
#include <cudf/detail/utilities/stream_pool.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <algorithm>
#include <cstddef>
#include <memory>
#include <mutex>
#include <vector>
namespace cudf::detail {
namespace {
// TODO: what is a good number here. what's the penalty for making it larger?
// Dave Baranec rule of thumb was max_streams_needed * num_concurrent_threads,
// where num_concurrent_threads was estimated to be 4. so using 32 will allow
// for 8 streams per thread, which should be plenty (decoding will be up to 4
// kernels when delta_byte_array decoding is added). rmm::cuda_stream_pool
// defaults to 16.
std::size_t constexpr STREAM_POOL_SIZE = 32;
// FIXME: "borrowed" from rmm...remove when this stream pool is moved there
#ifdef NDEBUG
#define CUDF_ASSERT_CUDA_SUCCESS(_call) \
do { \
(_call); \
} while (0);
#else
#define CUDF_ASSERT_CUDA_SUCCESS(_call) \
do { \
cudaError_t const status__ = (_call); \
if (status__ != cudaSuccess) { \
std::cerr << "CUDA Error detected. " << cudaGetErrorName(status__) << " " \
<< cudaGetErrorString(status__) << std::endl; \
} \
/* NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) */ \
assert(status__ == cudaSuccess); \
} while (0)
#endif
class cuda_stream_pool {
public:
// matching type used in rmm::cuda_stream_pool::get_stream(stream_id)
using stream_id_type = std::size_t;
virtual ~cuda_stream_pool() = default;
/**
* @brief Get a `cuda_stream_view` of a stream in the pool.
*
* This function is thread safe with respect to other calls to the same function.
*
* @return Stream view.
*/
virtual rmm::cuda_stream_view get_stream() = 0;
/**
* @brief Get a `cuda_stream_view` of the stream associated with `stream_id`.
*
* Equivalent values of `stream_id` return a `cuda_stream_view` to the same underlying stream.
* This function is thread safe with respect to other calls to the same function.
*
* @param stream_id Unique identifier for the desired stream
* @return Requested stream view.
*/
virtual rmm::cuda_stream_view get_stream(stream_id_type stream_id) = 0;
/**
* @brief Get a set of `cuda_stream_view` objects from the pool.
*
* An attempt is made to ensure that the returned vector does not contain duplicate
* streams, but this cannot be guaranteed if `count` is greater than the value returned by
* `get_stream_pool_size()`.
*
* This function is thread safe with respect to other calls to the same function.
*
* @param count The number of stream views to return.
* @return Vector containing `count` stream views.
*/
virtual std::vector<rmm::cuda_stream_view> get_streams(std::size_t count) = 0;
/**
* @brief Get the number of stream objects in the pool.
*
* This function is thread safe with respect to other calls to the same function.
*
* @return the number of stream objects in the pool
*/
virtual std::size_t get_stream_pool_size() const = 0;
};
/**
* @brief Implementation of `cuda_stream_pool` that wraps an `rmm::cuda_stram_pool`.
*/
class rmm_cuda_stream_pool : public cuda_stream_pool {
rmm::cuda_stream_pool _pool;
public:
rmm_cuda_stream_pool() : _pool{STREAM_POOL_SIZE} {}
rmm::cuda_stream_view get_stream() override { return _pool.get_stream(); }
rmm::cuda_stream_view get_stream(stream_id_type stream_id) override
{
return _pool.get_stream(stream_id);
}
std::vector<rmm::cuda_stream_view> get_streams(std::size_t count) override
{
if (count > STREAM_POOL_SIZE) {
CUDF_LOG_WARN("get_streams called with count ({}) > pool size ({})", count, STREAM_POOL_SIZE);
}
auto streams = std::vector<rmm::cuda_stream_view>();
for (uint32_t i = 0; i < count; i++) {
streams.emplace_back(_pool.get_stream());
}
return streams;
}
std::size_t get_stream_pool_size() const override { return STREAM_POOL_SIZE; }
};
/**
* @brief Implementation of `cuda_stream_pool` that always returns `cudf::get_default_stream()`
*/
class debug_cuda_stream_pool : public cuda_stream_pool {
public:
rmm::cuda_stream_view get_stream() override { return cudf::get_default_stream(); }
rmm::cuda_stream_view get_stream(stream_id_type stream_id) override
{
return cudf::get_default_stream();
}
std::vector<rmm::cuda_stream_view> get_streams(std::size_t count) override
{
return std::vector<rmm::cuda_stream_view>(count, cudf::get_default_stream());
}
std::size_t get_stream_pool_size() const override { return 1UL; }
};
/**
* @brief Initialize global stream pool.
*/
cuda_stream_pool* create_global_cuda_stream_pool()
{
if (getenv("LIBCUDF_USE_DEBUG_STREAM_POOL")) return new debug_cuda_stream_pool();
return new rmm_cuda_stream_pool();
}
// FIXME: these will be available in rmm soon
inline int get_num_cuda_devices()
{
rmm::cuda_device_id::value_type num_dev{};
CUDF_CUDA_TRY(cudaGetDeviceCount(&num_dev));
return num_dev;
}
rmm::cuda_device_id get_current_cuda_device()
{
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
return rmm::cuda_device_id{device_id};
}
/**
* @brief RAII struct to wrap a cuda event and ensure its proper destruction.
*/
struct cuda_event {
cuda_event() { CUDF_CUDA_TRY(cudaEventCreateWithFlags(&e_, cudaEventDisableTiming)); }
virtual ~cuda_event() { CUDF_ASSERT_CUDA_SUCCESS(cudaEventDestroy(e_)); }
operator cudaEvent_t() { return e_; }
private:
cudaEvent_t e_;
};
/**
* @brief Returns a cudaEvent_t for the current thread.
*
* The returned event is valid for the current device.
*
* @return A cudaEvent_t unique to the current thread and valid on the current device.
*/
cudaEvent_t event_for_thread()
{
thread_local std::vector<std::unique_ptr<cuda_event>> thread_events(get_num_cuda_devices());
auto const device_id = get_current_cuda_device();
if (not thread_events[device_id.value()]) {
thread_events[device_id.value()] = std::make_unique<cuda_event>();
}
return *thread_events[device_id.value()];
}
/**
* @brief Returns a reference to the global stream pool for the current device.
* @return `cuda_stream_pool` valid on the current device.
*/
cuda_stream_pool& global_cuda_stream_pool()
{
// using bare pointers here to deliberately allow them to leak. otherwise we wind up with
// seg faults trying to destroy stream objects after the context has shut down.
static std::vector<cuda_stream_pool*> pools(get_num_cuda_devices());
static std::mutex mutex;
auto const device_id = get_current_cuda_device();
std::lock_guard<std::mutex> lock(mutex);
if (pools[device_id.value()] == nullptr) {
pools[device_id.value()] = create_global_cuda_stream_pool();
}
return *pools[device_id.value()];
}
} // anonymous namespace
std::vector<rmm::cuda_stream_view> fork_streams(rmm::cuda_stream_view stream, std::size_t count)
{
auto const streams = global_cuda_stream_pool().get_streams(count);
auto const event = event_for_thread();
CUDF_CUDA_TRY(cudaEventRecord(event, stream));
std::for_each(streams.begin(), streams.end(), [&](auto& strm) {
CUDF_CUDA_TRY(cudaStreamWaitEvent(strm, event, 0));
});
return streams;
}
void join_streams(host_span<rmm::cuda_stream_view const> streams, rmm::cuda_stream_view stream)
{
auto const event = event_for_thread();
std::for_each(streams.begin(), streams.end(), [&](auto& strm) {
CUDF_CUDA_TRY(cudaEventRecord(event, strm));
CUDF_CUDA_TRY(cudaStreamWaitEvent(stream, event, 0));
});
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/type_checks.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/type_checks.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
namespace cudf {
namespace {
struct columns_equal_fn {
template <typename T>
bool operator()(column_view const&, column_view const&)
{
return true;
}
};
template <>
bool columns_equal_fn::operator()<dictionary32>(column_view const& lhs, column_view const& rhs)
{
auto const kidx = dictionary_column_view::keys_column_index;
return lhs.num_children() > 0 and rhs.num_children() > 0
? lhs.child(kidx).type() == rhs.child(kidx).type()
: lhs.is_empty() and rhs.is_empty();
}
template <>
bool columns_equal_fn::operator()<list_view>(column_view const& lhs, column_view const& rhs)
{
auto const& ci = lists_column_view::child_column_index;
return column_types_equal(lhs.child(ci), rhs.child(ci));
}
template <>
bool columns_equal_fn::operator()<struct_view>(column_view const& lhs, column_view const& rhs)
{
return lhs.num_children() == rhs.num_children() and
std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.num_children()),
[&](auto i) { return column_types_equal(lhs.child(i), rhs.child(i)); });
}
}; // namespace
// Implementation note: avoid using double dispatch for this function
// as it increases code paths to NxN for N types.
bool column_types_equal(column_view const& lhs, column_view const& rhs)
{
if (lhs.type() != rhs.type()) { return false; }
return type_dispatcher(lhs.type(), columns_equal_fn{}, lhs, rhs);
}
bool column_types_equivalent(column_view const& lhs, column_view const& rhs)
{
if (lhs.type().id() != rhs.type().id()) { return false; }
return type_dispatcher(lhs.type(), columns_equal_fn{}, lhs, rhs);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/logger.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/logger.hpp>
#include "spdlog/sinks/stdout_sinks.h"
#include <spdlog/sinks/basic_file_sink.h>
#include <string>
namespace {
/**
* @brief Creates a sink for libcudf logging.
*
* Returns a file sink if the file name has been specified, otherwise returns a stderr sink.
*/
[[nodiscard]] spdlog::sink_ptr make_libcudf_sink()
{
if (auto filename = std::getenv("LIBCUDF_DEBUG_LOG_FILE"); filename != nullptr) {
return std::make_shared<spdlog::sinks::basic_file_sink_mt>(filename, true);
} else {
return std::make_shared<spdlog::sinks::stderr_sink_mt>();
}
}
/**
* @brief Converts the level name into the `spdlog` level enum.
*/
[[nodiscard]] spdlog::level::level_enum libcudf_log_level()
{
auto const env_level = std::getenv("LIBCUDF_LOGGING_LEVEL");
if (env_level == nullptr) { return spdlog::level::warn; }
auto const env_lvl_str = std::string(env_level);
if (env_lvl_str == "TRACE") return spdlog::level::trace;
if (env_lvl_str == "DEBUG") return spdlog::level::debug;
if (env_lvl_str == "INFO") return spdlog::level::info;
if (env_lvl_str == "WARN") return spdlog::level::warn;
if (env_lvl_str == "ERROR") return spdlog::level::err;
if (env_lvl_str == "CRITICAL") return spdlog::level::critical;
if (env_lvl_str == "OFF") return spdlog::level::off;
CUDF_FAIL("Invalid value for LIBCUDF_LOGGING_LEVEL environment variable");
}
/**
* @brief Simple wrapper around a spdlog::logger that performs cuDF-specific initialization.
*/
struct logger_wrapper {
spdlog::logger logger_;
logger_wrapper() : logger_{"CUDF", make_libcudf_sink()}
{
logger_.set_pattern("[%6t][%H:%M:%S:%f][%-6l] %v");
logger_.set_level(libcudf_log_level());
logger_.flush_on(spdlog::level::warn);
}
};
} // namespace
spdlog::logger& cudf::logger()
{
static logger_wrapper wrapped{};
return wrapped.logger_;
}
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/type_dispatcher.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/type_dispatcher.hpp>
namespace cudf {
std::string type_to_name(data_type type) { return type_dispatcher(type, type_to_name_impl{}); }
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/default_stream.cpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/default_stream.hpp>
namespace cudf {
namespace detail {
#if defined(CUDF_USE_PER_THREAD_DEFAULT_STREAM)
rmm::cuda_stream_view const default_stream_value{rmm::cuda_stream_per_thread};
#else
rmm::cuda_stream_view const default_stream_value{};
#endif
} // namespace detail
/**
* @brief Check if per-thread default stream is enabled.
*
* @return true if PTDS is enabled, false otherwise.
*/
bool is_ptds_enabled()
{
#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
return true;
#else
return false;
#endif
}
rmm::cuda_stream_view const get_default_stream() { return detail::default_stream_value; }
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/stacktrace.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/stacktrace.hpp>
#if defined(__GNUC__) && defined(CUDF_BUILD_STACKTRACE_DEBUG)
#include <cxxabi.h>
#include <execinfo.h>
#include <cstdlib>
#include <cstring>
#include <sstream>
#endif // defined(__GNUC__) && defined(CUDF_BUILD_STACKTRACE_DEBUG)
namespace cudf::detail {
std::string get_stacktrace(capture_last_stackframe capture_last_frame)
{
#if defined(__GNUC__) && defined(CUDF_BUILD_STACKTRACE_DEBUG)
constexpr int max_stack_depth = 64;
void* stack[max_stack_depth];
auto const depth = backtrace(stack, max_stack_depth);
auto const modules = backtrace_symbols(stack, depth);
if (modules == nullptr) { return "No stacktrace could be captured!"; }
std::stringstream ss;
// Skip one more depth to avoid including the stackframe of this function.
auto const skip_depth = 1 + (capture_last_frame == capture_last_stackframe::YES ? 0 : 1);
for (auto i = skip_depth; i < depth; ++i) {
// Each modules[i] string contains a mangled name in the format like following:
// `module_name(function_name+0x012) [0x01234567890a]`
// We need to extract function name and function offset.
char* begin_func_name = std::strstr(modules[i], "(");
char* begin_func_offset = std::strstr(modules[i], "+");
char* end_func_offset = std::strstr(modules[i], ")");
auto const frame_idx = i - skip_depth;
if (begin_func_name && begin_func_offset && end_func_offset &&
begin_func_name < begin_func_offset) {
// Split `modules[i]` into separate null-terminated strings.
// After this, mangled function name will then be [begin_func_name, begin_func_offset), and
// function offset is in [begin_func_offset, end_func_offset).
*(begin_func_name++) = '\0';
*(begin_func_offset++) = '\0';
*end_func_offset = '\0';
// We need to demangle function name.
int status{0};
char* func_name = abi::__cxa_demangle(begin_func_name, nullptr, nullptr, &status);
ss << "#" << frame_idx << ": " << modules[i] << " : "
<< (status == 0 /*demangle success*/ ? func_name : begin_func_name) << "+"
<< begin_func_offset << "\n";
free(func_name);
} else {
ss << "#" << frame_idx << ": " << modules[i] << "\n";
}
}
free(modules);
return ss.str();
#else
#ifdef CUDF_BUILD_STACKTRACE_DEBUG
return "Stacktrace is only supported when built with a GNU compiler.";
#else
return "libcudf was not built with stacktrace support.";
#endif // CUDF_BUILD_STACKTRACE_DEBUG
#endif // __GNUC__
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/utilities/traits.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <cudf/strings/string_view.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/dictionary.hpp>
namespace cudf {
namespace {
/**
* @brief Helper functor to check if a specified type `T` supports relational comparisons.
*
*/
struct unary_relationally_comparable_functor {
/**
* @brief Returns true if `T` supports relational comparisons.
*
* @tparam T Type to check
* @return true if `T` supports relational comparisons
*/
template <typename T>
inline bool operator()() const
{
return cudf::is_relationally_comparable<T, T>();
}
};
} // namespace
/**
* @brief Checks whether `data_type` `type` supports relational comparisons.
*
* @param type Data_type for comparison.
* @return true If `type` supports relational comparisons.
* @return false If `type` does not support relational comparisons.
*/
bool is_relationally_comparable(data_type type)
{
return type_dispatcher(type, unary_relationally_comparable_functor{});
}
namespace {
/**
* @brief Helper functor to check if a specified type `T` supports equality comparisons.
*
*/
struct unary_equality_comparable_functor {
/**
* @brief Checks whether `T` supports equality comparisons.
*
* @tparam T Type to check
* @return true if `T` supports equality comparisons
*/
template <typename T>
bool operator()() const
{
return cudf::is_equality_comparable<T, T>();
}
};
} // namespace
/**
* @brief Checks whether `data_type` `type` supports equality comparisons.
*
* @param type Data_type for comparison.
* @return true If `type` supports equality comparisons.
* @return false If `type` does not support equality comparisons.
*/
bool is_equality_comparable(data_type type)
{
return cudf::type_dispatcher(type, unary_equality_comparable_functor{});
}
struct is_numeric_impl {
template <typename T>
constexpr bool operator()()
{
return is_numeric<T>();
}
};
/**
* @brief Indicates whether `type` is a numeric `data_type`.
*
* "Numeric" types are fundamental integral/floating point types such as `INT*`
* or `FLOAT*`. Types that wrap a numeric type are not considered numeric, e.g.,
*`TIMESTAMP`.
*
* @param type The `data_type` to verify
* @return true `type` is numeric
* @return false `type` is not numeric
*/
bool is_numeric(data_type type) { return cudf::type_dispatcher(type, is_numeric_impl{}); }
struct is_index_type_impl {
template <typename T>
constexpr bool operator()()
{
return is_index_type<T>();
}
};
/**
* @brief Indicates whether the type `type` is a index type.
*
* A type `T` is considered an index type if it is valid to use
* elements of type `T` to index into a column. I.e.,
* index types are integral types such as 'INT*' apart from 'bool'.
*
* @param type The `data_type` to verify
* @return true `type` is index type
* @return false `type` is not index type
*/
bool is_index_type(data_type type) { return cudf::type_dispatcher(type, is_index_type_impl{}); }
struct is_unsigned_impl {
template <typename T>
constexpr bool operator()()
{
return is_unsigned<T>();
}
};
/**
* @brief Indicates whether `type` is a unsigned numeric `data_type`.
*
* "Unsigned Numeric" types are fundamental integral types such as `UINT*`.
*
* @param type The `data_type` to verify
* @return true `type` is unsigned numeric
* @return false `type` is signed numeric
*/
bool is_unsigned(data_type type) { return cudf::type_dispatcher(type, is_unsigned_impl{}); }
struct is_integral_impl {
template <typename T>
constexpr bool operator()()
{
return is_integral<T>();
}
};
bool is_integral(data_type type) { return cudf::type_dispatcher(type, is_integral_impl{}); }
struct is_integral_not_bool_impl {
template <typename T>
constexpr bool operator()()
{
return is_integral_not_bool<T>();
}
};
bool is_integral_not_bool(data_type type)
{
return cudf::type_dispatcher(type, is_integral_not_bool_impl{});
}
struct is_floating_point_impl {
template <typename T>
constexpr bool operator()()
{
return is_floating_point<T>();
}
};
/**
* @brief Indicates whether `type` is a floating point `data_type`.
*
* "Floating point" types are fundamental floating point types such as `FLOAT*`.
*
* @param type The `data_type` to verify
* @return true `type` is floating point
* @return false `type` is not floating point
*/
bool is_floating_point(data_type type)
{
return cudf::type_dispatcher(type, is_floating_point_impl{});
}
struct is_boolean_impl {
template <typename T>
constexpr bool operator()()
{
return is_boolean<T>();
}
};
/**
* @brief Indicates whether `type` is a Boolean `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a Boolean
* @return false `type` is not a Boolean
*/
bool is_boolean(data_type type) { return cudf::type_dispatcher(type, is_boolean_impl{}); }
struct is_fixed_point_impl {
template <typename T>
constexpr bool operator()()
{
return is_fixed_point<T>();
}
};
/**
* @brief Indicates whether `type` is a fixed point `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a fixed point type
* @return false `type` is not a fixed point type
*/
bool is_fixed_point(data_type type) { return cudf::type_dispatcher(type, is_fixed_point_impl{}); }
struct is_timestamp_impl {
template <typename T>
constexpr bool operator()()
{
return is_timestamp<T>();
}
};
/**
* @brief Indicates whether `type` is a timestamp `data_type`.
*
* "Timestamp" types are int32_t or int64_t durations since the unix epoch.
*
* @param type The `data_type` to verify
* @return true `type` is a timestamp
* @return false `type` is not a timestamp
*/
bool is_timestamp(data_type type) { return cudf::type_dispatcher(type, is_timestamp_impl{}); }
struct is_duration_impl {
template <typename T>
constexpr bool operator()()
{
return is_duration<T>();
}
};
/**
* @brief Indicates whether `type` is a duration `data_type`.
*
* "Duration" types are int32_t or int64_t tick counts representing a time interval.
*
* @param type The `data_type` to verify
* @return true `type` is a duration
* @return false `type` is not a duration
*/
bool is_duration(data_type type) { return cudf::type_dispatcher(type, is_duration_impl{}); }
struct is_chrono_impl {
template <typename T>
constexpr bool operator()()
{
return is_chrono<T>();
}
};
/**
* @brief Indicates whether `type` is a chrono `data_type`.
*
* Chrono types include cudf timestamp types, which represent a point in time, and cudf
* duration types that represent a time interval.
*
* @param type The `data_type` to verify
* @return true `type` is a chrono type
* @return false `type` is not a chrono type
*/
bool is_chrono(data_type type) { return cudf::type_dispatcher(type, is_chrono_impl{}); }
struct is_dictionary_impl {
template <typename T>
constexpr bool operator()()
{
return is_dictionary<T>();
}
};
/**
* @brief Indicates whether `type` is a dictionary `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a dictionary type
* @return false `type` is not a dictionary type
*/
bool is_dictionary(data_type type) { return cudf::type_dispatcher(type, is_dictionary_impl{}); }
struct is_fixed_width_impl {
template <typename T>
constexpr bool operator()()
{
return is_fixed_width<T>();
}
};
/**
* @brief Indicates whether elements of `type` are fixed-width.
*
* Elements of a fixed-width type all have the same size in bytes.
*
* @param type The `data_type` to verify
* @return true `type` is fixed-width
* @return false `type` is variable-width
*/
bool is_fixed_width(data_type type) { return cudf::type_dispatcher(type, is_fixed_width_impl{}); }
struct is_compound_impl {
template <typename T>
constexpr bool operator()()
{
return is_compound<T>();
}
};
/**
* @brief Indicates whether elements of `type` are compound.
*
* `column`s with "compound" elements are logically a single column of elements,
* but may be concretely implemented with two or more `column`s. For example, a
* `STRING` column could contain a `column` of offsets and a child `column` of
* characters.
*
* @param type The `data_type` to verify
* @return true `type` is a compound type
* @return false `type` is a simple type
*/
bool is_compound(data_type type) { return cudf::type_dispatcher(type, is_compound_impl{}); }
struct is_nested_impl {
template <typename T>
constexpr bool operator()()
{
return is_nested<T>();
}
};
/**
* @brief Indicates whether `type` is a nested type
*
* "Nested" types are distinct from compound types in that they
* can have an arbitrarily deep list of descendants of the same
* type. Strings are not a nested type, but lists are.
*
* @param type The `data_type` to verify
* @return true `type` is a nested type
* @return false `type` is not a nested type
*/
bool is_nested(data_type type) { return cudf::type_dispatcher(type, is_nested_impl{}); }
namespace {
template <typename FromType>
struct is_bit_castable_to_impl {
template <typename ToType, std::enable_if_t<is_compound<ToType>()>* = nullptr>
constexpr bool operator()()
{
return false;
}
template <typename ToType, std::enable_if_t<not is_compound<ToType>()>* = nullptr>
constexpr bool operator()()
{
if (not cuda::std::is_trivially_copyable_v<FromType> ||
not cuda::std::is_trivially_copyable_v<ToType>) {
return false;
}
constexpr auto from_size = sizeof(cudf::device_storage_type_t<FromType>);
constexpr auto to_size = sizeof(cudf::device_storage_type_t<ToType>);
return from_size == to_size;
}
};
struct is_bit_castable_from_impl {
template <typename FromType, std::enable_if_t<is_compound<FromType>()>* = nullptr>
constexpr bool operator()(data_type)
{
return false;
}
template <typename FromType, std::enable_if_t<not is_compound<FromType>()>* = nullptr>
constexpr bool operator()(data_type to)
{
return cudf::type_dispatcher(to, is_bit_castable_to_impl<FromType>{});
}
};
} // namespace
/**
* @brief Indicates whether `from` is bit-castable to `to`.
*
* This casting is based on std::bit_cast. Data types that have the same size and are trivially
* copyable are eligible for this casting.
*
* See `cudf::bit_cast()` which returns a zero-copy `column_view` when casting between
* bit-castable types.
*
* @param from The `data_type` to convert from
* @param to The `data_type` to convert to
* @return `true` if the types are castable
*/
bool is_bit_castable(data_type from, data_type to)
{
return type_dispatcher(from, is_bit_castable_from_impl{}, to);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/util.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <string>
namespace cudf {
namespace jit {
/**
* @brief Get the raw pointer to data in a (mutable_)column_view
*/
void const* get_data_ptr(column_view const& view);
/**
* @brief Get the raw pointer to data in a scalar
*/
void const* get_data_ptr(scalar const& s);
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/cache.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <jitify2.hpp>
#include <memory>
namespace cudf {
namespace jit {
jitify2::ProgramCache<>& get_program_cache(jitify2::PreprocessedProgramData preprog);
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/parser.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parser.hpp"
#include <cudf/utilities/error.hpp>
#include <algorithm>
#include <cctype>
#include <map>
#include <set>
#include <string>
#include <vector>
namespace cudf {
namespace jit {
constexpr char percent_escape[] = "_";
inline bool is_white(char const c) { return c == ' ' || c == '\n' || c == '\r' || c == '\t'; }
std::string ptx_parser::escape_percent(std::string const& src)
{
// b/c we're transforming into inline ptx we aren't allowed to have register names starting with %
auto f = std::find_if_not(src.begin(), src.end(), [](auto c) { return is_white(c) || c == '['; });
if (f != src.end() && *f == '%') {
std::string output = src;
output.replace(std::distance(src.begin(), f), 1, percent_escape);
return output;
}
return src;
}
std::string ptx_parser::remove_nonalphanumeric(std::string const& src)
{
std::string out = src;
auto f = std::find_if_not(out.begin(), out.end(), [](auto c) { return is_white(c) || c == '['; });
auto l = std::find_if(f, out.end(), [](auto c) { return is_white(c) || c == ']'; });
std::replace_if(
f, l, [](auto c) { return !isalnum(c) && c != '_'; }, '_');
return std::string(f, l);
}
std::string ptx_parser::register_type_to_contraint(std::string const& src)
{
if (src == ".b8" || src == ".u8" || src == ".s8")
return "h";
else if (src == ".u16" || src == ".s16" || src == ".b16" || src == ".f16")
return "h";
else if (src == ".b32" || src == ".u32" || src == ".s32" || src == ".f16x2")
return "r";
else if (src == ".u64" || src == ".b64" || src == ".s64")
return "l";
else if (src == ".f32")
return "f";
else if (src == ".f64")
return "d";
else
return "x_reg";
}
std::string ptx_parser::register_type_to_cpp_type(std::string const& register_type)
{
if (register_type == ".b8" || register_type == ".s8" || register_type == ".u8")
return "char";
else if (register_type == ".u16")
return "short int";
else if (register_type == ".s16")
return "short int";
else if (register_type == ".f16")
return "half";
else if (register_type == ".u32")
return "int";
else if (register_type == ".s32")
return "int";
else if (register_type == ".f16x2")
return "half2";
else if (register_type == ".u64")
return "long int";
else if (register_type == ".s64")
return "long int";
else if (register_type == ".f32")
return "float";
else if (register_type == ".f64")
return "double";
else
return "x_cpptype";
}
std::string ptx_parser::parse_instruction(std::string const& src)
{
// I am assuming for an instruction statement the starting phrase is an
// instruction.
size_t const length = src.size();
std::string output;
std::string suffix;
std::string original_code = "\n /** " + src + " */\n";
int piece_count = 0;
size_t start = 0;
size_t stop = 0;
bool is_instruction = true;
bool is_pragma_instruction = false;
bool is_param_loading_instruction = false;
std::string constraint;
std::string register_type;
bool blank = true;
std::string cpp_typename;
while (stop < length) {
while (start < length && (is_white(src[start]) || src[start] == ',' || src[start] == '{' ||
src[start] == '}')) { // running to the first non-white character.
if (src[start] == ',') output += ',';
if (src[start] == '{') output += '{';
if (src[start] == '}') output += '}';
start++;
}
stop = start;
if (stop < length) {
blank = false;
output += " ";
} else {
break;
}
if (src[start] == '[') {
while (stop < length && src[stop] != ']') {
stop++;
}
stop++;
} else {
while (stop < length && !is_white(src[stop]) && src[stop] != ',' && src[stop] != ':') {
stop++;
}
if (src[stop] == ':') {
// This is a branch
stop++;
output += std::string(src, start, stop - start);
start = stop;
continue;
}
}
std::string piece = std::string(src, start, stop - start);
if (is_instruction) {
if (piece.find("ld.param") != std::string::npos) {
is_param_loading_instruction = true;
register_type = std::string(piece, 8, stop - 8);
// This is the ld.param sentence
cpp_typename = register_type_to_cpp_type(register_type);
if (cpp_typename == "int" || cpp_typename == "short int" || cpp_typename == "char") {
// The trick to support `ld` statement whose destination reg. wider than
// the instruction width, e.g.
//
// "ld.param.s32 %rd0, [...];",
//
// where %rd0 is a 64-bit register. Directly converting to "mov" instruction
// does not work since "register widening" is ONLY allowed for
// "ld", "st", and "cvt". So we use cvt instead and something like
// "cvt.s32.s32". This keep the same operation behavior and when compiling to
// SASS code "usually" (in cases I have seen) this is optimized away, thus
// gives no performance penalty.
output += " cvt" + register_type + register_type;
} else {
output += " mov" + register_type;
}
constraint = register_type_to_contraint(register_type);
} else if (piece.find("st.param") != std::string::npos) {
return "asm volatile (\"" + output +
"/** *** The way we parse the CUDA PTX assumes the function returns the return "
"value through the first function parameter. Thus the `st.param.***` instructions "
"are not processed. *** */" +
"\");" + original_code; // Our port does not support return value;
} else if (piece.find(".pragma") != std::string::npos) {
is_pragma_instruction = true;
output += " " + piece;
} else if (piece[0] == '@') {
output += " @" + remove_nonalphanumeric(piece.substr(1, piece.size() - 1));
} else {
output += " " + piece;
}
is_instruction = false;
} else {
// Here it should be the registers.
if (piece_count == 2 && is_param_loading_instruction) {
// This is the source of the parameter loading instruction
output += " %0";
if (cpp_typename == "char") {
suffix = ": : \"" + constraint + "\"( static_cast<short>(" +
remove_nonalphanumeric(piece) + "))";
} else {
suffix = ": : \"" + constraint + "\"(" + remove_nonalphanumeric(piece) + ")";
}
// Here we get to see the actual type of the input arguments.
input_arg_list[remove_nonalphanumeric(piece)] = register_type_to_cpp_type(register_type);
} else if (is_pragma_instruction) {
// quote any string
std::string transformed_piece;
for (const auto& c : piece) {
if (c == '"') {
transformed_piece += "\\\"";
} else {
transformed_piece += c;
}
}
output += transformed_piece;
} else {
output += escape_percent(std::string(src, start, stop - start));
}
}
start = stop;
piece_count++;
}
if (!blank) output += ";";
return "asm volatile (\"" + output + "\"" + suffix + ");" + original_code;
}
std::string ptx_parser::parse_statement(std::string const& src)
{
auto f = std::find_if_not(src.cbegin(), src.cend(), [](auto c) { return is_white(c); });
return f == src.cend() ? " \n" : parse_instruction(std::string(f, src.cend()));
}
std::vector<std::string> ptx_parser::parse_function_body(std::string const& src)
{
auto f = src.cbegin();
std::vector<std::string> statements;
while (f < src.cend()) {
auto l = std::find(f, src.cend(), ';');
statements.push_back(parse_statement(std::string(f, l)));
f = ++l;
}
return statements;
}
std::string ptx_parser::parse_param(std::string const& src)
{
auto i = 0;
auto f = src.cbegin();
while (f < src.cend() && i <= 3) {
f = std::find_if_not(f, src.cend(), [](auto c) { return is_white(c); });
auto l = std::find_if(f, src.cend(), [](auto c) { return is_white(c); });
if (++i == 3) return remove_nonalphanumeric(std::string(f, l));
f = l;
}
return "";
}
std::string ptx_parser::parse_param_list(std::string const& src)
{
auto f = src.begin();
auto item_count = 0;
std::string output{};
while (f < src.end()) {
auto l = std::find(f, src.end(), ',');
output += [&, name = parse_param(std::string(f, l))] {
if (pointer_arg_list.find(item_count) != pointer_arg_list.end()) {
if (item_count == 0) {
return output_arg_type + "* " + name;
} else {
// On a 64-bit machine inside the PTX function body a pointer is
// literally just a uint_64 so here is doesn't make sense to
// have the type of the pointer. Thus we will just use void* here.
return ",\n const void* " + name;
}
} else {
if (input_arg_list.count(name)) {
return ", \n " + input_arg_list[name] + " " + name;
} else {
// This parameter isn't used in the function body so we just pretend
// it's an int. After being inlined they are gone anyway.
return ", \n int " + name;
}
}
}();
f = ++l;
item_count++;
}
return "\n " + output + "\n";
}
std::string ptx_parser::parse_function_header(std::string const& src)
{
// Essentially we only need the information inside the two pairs of parentheses.
auto f = [&] {
auto i = std::find_if_not(src.cbegin(), src.cend(), [](auto c) { return is_white(c); });
if (i != src.cend() && *i == '(') // This function has a return type
// First Pass: output param list
i = std::find_if_not(std::next(i), src.cend(), [](auto c) { return c == ')'; });
// The function name
i = std::find_if_not(std::next(i), src.cend(), [](auto c) { return is_white(c) || c == '('; });
// Second Pass: input param list
return std::next(std::find(i, src.cend(), '('));
}();
auto l = std::find(f, src.cend(), ')');
auto const input_arg = parse_param_list(std::string(f, l));
return "\n__device__ __inline__ void " + function_name + "(" + input_arg + "){" + "\n";
}
std::string remove_comments(std::string const& src)
{
std::string output;
auto f = src.cbegin();
while (f < src.cend()) {
auto l = std::find(f, src.cend(), '/');
output.append(f, l); // push chunk instead of 1 char at a time
f = std::next(l); // skip over '/'
if (l < src.cend()) {
char n = f < src.cend() ? *f : '?';
if (n == '/') { // found "//"
f = std::find(f, src.cend(), '\n'); // skip to end of line
} else if (n == '*') { // found "/*"
auto term = std::string("*/"); // skip to end of next "*/"
f = std::search(std::next(f), src.cend(), term.cbegin(), term.cend()) + term.size();
} else {
output.push_back('/'); // lone '/' should be pushed into output
}
}
}
return output;
}
// The interface
std::string ptx_parser::parse()
{
std::string no_comments = remove_comments(ptx);
input_arg_list.clear();
auto const _func = std::string(".func"); // Go directly to the .func mark
auto f = std::search(no_comments.cbegin(), no_comments.cend(), _func.cbegin(), _func.cend()) +
_func.size();
CUDF_EXPECTS(f < no_comments.cend(), "No function (.func) found in the input ptx code.\n");
auto l = std::find(f, no_comments.cend(), '{');
auto f2 = std::next(l);
auto l2 = std::find_if(f2, no_comments.cend(), [brace_count = 0](auto c) mutable {
if (c == '{') ++brace_count;
if (c == '}') {
if (brace_count == 0) return true; // find matching } to first found {
--brace_count;
}
return false;
});
// DO NOT CHANGE ORDER - parse_function_body must be called before parse_function_header
// because the function parameter types are inferred from their corresponding load
// instructions in the function body
auto const fn_body_output = parse_function_body(std::string(f2, l2));
auto const fn_header_output = parse_function_header(std::string(f, l));
// Don't use std::accumulate until C++20 when rvalue references are supported
auto final_output = fn_header_output + "\n asm volatile (\"{\");";
for (auto const& line : fn_body_output)
final_output += line.find("ret;") != std::string::npos ? " asm volatile (\"bra RETTGT;\");\n"
: " " + line + "\n";
return final_output + " asm volatile (\"RETTGT:}\");}";
}
ptx_parser::ptx_parser(std::string const& ptx_,
std::string const& function_name_,
std::string const& output_arg_type_,
std::set<int> const& pointer_arg_list_)
: ptx(ptx_),
function_name(function_name_),
output_arg_type(output_arg_type_),
pointer_arg_list(pointer_arg_list_)
{
}
// The interface
std::string parse_single_function_cuda(std::string const& src, std::string const& function_name)
{
std::string no_comments = remove_comments(src);
// For CUDA device function we just need to find the function
// name and replace it with the specified one.
size_t const length = no_comments.size();
size_t start = 0;
size_t stop = start;
while (stop < length && no_comments[stop] != '(') {
stop++;
}
CUDF_EXPECTS(stop != length && stop != 0,
"No CUDA device function found in the input CUDA code.\n");
stop--;
while (stop > 0 && is_white(no_comments[stop])) {
stop--;
}
CUDF_EXPECTS(stop != 0 || !is_white(no_comments[0]),
"No CUDA device function name found in the input CUDA code.\n");
start = stop;
while (start > 0 && !is_white(no_comments[start])) {
start--;
}
start++;
stop++;
CUDF_EXPECTS(start < stop, "No CUDA device function name found in the input CUDA code.\n");
no_comments.replace(start, stop - start, function_name);
return no_comments;
}
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/cache.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/error.hpp>
#include <cuda.h>
#include <jitify2.hpp>
#include <cstddef>
#include <filesystem>
namespace cudf {
namespace jit {
// Get the directory in home to use for storing the cache
std::filesystem::path get_user_home_cache_dir()
{
auto home_dir = std::getenv("HOME");
if (home_dir != nullptr) {
return std::filesystem::path(home_dir) / ".cudf";
} else {
return std::filesystem::path();
}
}
// Default `LIBCUDF_KERNEL_CACHE_PATH` to `$HOME/.cudf/$CUDF_VERSION`.
// This definition can be overridden at compile time by specifying a
// `-DLIBCUDF_KERNEL_CACHE_PATH=/kernel/cache/path` CMake argument.
// Use `std::filesystem` for cross-platform path resolution and dir
// creation. This path is used in the `getCacheDir()` function below.
#if !defined(LIBCUDF_KERNEL_CACHE_PATH)
#define LIBCUDF_KERNEL_CACHE_PATH get_user_home_cache_dir()
#endif
/**
* @brief Get the string path to the JITIFY kernel cache directory.
*
* This path can be overridden at runtime by defining an environment variable
* named `LIBCUDF_KERNEL_CACHE_PATH`. The value of this variable must be a path
* under which the process' user has read/write privileges.
*
* This function returns a path to the cache directory, creating it if it
* doesn't exist.
*
* The default cache directory is `$HOME/.cudf/$CUDF_VERSION`. If no overrides
* are used and if $HOME is not defined, returns an empty path and file
* caching is not used.
*/
std::filesystem::path get_cache_dir()
{
// The environment variable always overrides the
// default/compile-time value of `LIBCUDF_KERNEL_CACHE_PATH`
auto kernel_cache_path_env = std::getenv("LIBCUDF_KERNEL_CACHE_PATH");
auto kernel_cache_path = std::filesystem::path(
kernel_cache_path_env != nullptr ? kernel_cache_path_env : LIBCUDF_KERNEL_CACHE_PATH);
// Cache path could be empty when env HOME is unset or LIBCUDF_KERNEL_CACHE_PATH is defined to be
// empty, to disallow use of file cache at runtime.
if (not kernel_cache_path.empty()) {
kernel_cache_path /= std::string{CUDF_STRINGIFY(CUDF_VERSION)};
// Make per device cache based on compute capability. This is to avoid multiple devices of
// different compute capability to access the same kernel cache.
int device;
int cc_major;
int cc_minor;
CUDF_CUDA_TRY(cudaGetDevice(&device));
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&cc_major, cudaDevAttrComputeCapabilityMajor, device));
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&cc_minor, cudaDevAttrComputeCapabilityMinor, device));
int cc = cc_major * 10 + cc_minor;
kernel_cache_path /= std::to_string(cc);
try {
// `mkdir -p` the kernel cache path if it doesn't exist
std::filesystem::create_directories(kernel_cache_path);
} catch (std::exception const& e) {
// if directory creation fails for any reason, return empty path
return std::filesystem::path();
}
}
return kernel_cache_path;
}
std::string get_program_cache_dir()
{
#if defined(JITIFY_USE_CACHE)
return get_cache_dir().string();
#else
return {};
#endif
}
std::size_t try_parse_numeric_env_var(char const* const env_name, std::size_t default_val)
{
auto const value = std::getenv(env_name);
return value != nullptr ? std::stoull(value) : default_val;
}
jitify2::ProgramCache<>& get_program_cache(jitify2::PreprocessedProgramData preprog)
{
static std::mutex caches_mutex{};
static std::unordered_map<std::string, std::unique_ptr<jitify2::ProgramCache<>>> caches{};
std::lock_guard<std::mutex> caches_lock(caches_mutex);
auto existing_cache = caches.find(preprog.name());
if (existing_cache == caches.end()) {
auto const kernel_limit_proc =
try_parse_numeric_env_var("LIBCUDF_KERNEL_CACHE_LIMIT_PER_PROCESS", 10'000);
auto const kernel_limit_disk =
try_parse_numeric_env_var("LIBCUDF_KERNEL_CACHE_LIMIT_DISK", 100'000);
// if kernel_limit_disk is zero, jitify will assign it the value of kernel_limit_proc.
// to avoid this, we treat zero as "disable disk caching" by not providing the cache dir.
auto const cache_dir = kernel_limit_disk == 0 ? std::string{} : get_program_cache_dir();
auto const res =
caches.insert({preprog.name(),
std::make_unique<jitify2::ProgramCache<>>(
kernel_limit_proc, preprog, nullptr, cache_dir, kernel_limit_disk)});
existing_cache = res.first;
}
return *(existing_cache->second);
}
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/util.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <string>
namespace cudf {
namespace jit {
struct get_data_ptr_functor {
/**
* @brief Gets the data pointer from a column_view
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
void const* operator()(column_view const& view)
{
return static_cast<void const*>(view.template data<T>());
}
template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>())>
void const* operator()(column_view const& view)
{
CUDF_FAIL("Invalid data type for JIT operation");
}
/**
* @brief Gets the data pointer from a scalar
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
void const* operator()(scalar const& s)
{
using ScalarType = scalar_type_t<T>;
auto s1 = static_cast<ScalarType const*>(&s);
return static_cast<void const*>(s1->data());
}
template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>())>
void const* operator()(scalar const& s)
{
CUDF_FAIL("Invalid data type for JIT operation");
}
};
void const* get_data_ptr(column_view const& view)
{
return type_dispatcher<dispatch_storage_type>(view.type(), get_data_ptr_functor{}, view);
}
void const* get_data_ptr(scalar const& s)
{
return type_dispatcher<dispatch_storage_type>(s.type(), get_data_ptr_functor{}, s);
}
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/jit/parser.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <map>
#include <set>
#include <string>
#include <vector>
namespace cudf {
namespace jit {
/**
* @brief Parse and transform a piece of PTX code that contains the implementation
* of a `__device__` function into a CUDA `__device__` `__inline__` function.
*
* @param `src` The input PTX code.
* @param `function_name` The User defined function that the output CUDA function
* will have.
* @param `output_arg_type` The output type of the PTX function, e.g. "int", "int64_t"
* @return The output CUDA `__device__` `__inline__` function
*/
class ptx_parser {
private:
std::string ptx;
std::string function_name;
std::string output_arg_type;
std::set<int> pointer_arg_list;
std::map<std::string, std::string> input_arg_list;
private:
/**
* @brief parse and transform header part of the PTX code into a CUDA header
*
* The result always has `__device__ __inline__ void`. The types of the input
* parameters are determined from, in descending order of priority:
* 1. The first parameter is always of type "`output_arg_type`*"
* 2. All other parameters marked in pointer_arg_list are of type "const void*"
* 3. For parameters that are used in the function body their types are
* inferred from their corresponding parameter loading instructions
* 4. Unused parameters are always of type "int"
*
* @param src The header part of the PTX code
* @return The parsed CUDA header
As an example:
.visible .func (.param .b32 func_retval0) _ZN8__main__7add$241Eff(
.param .b64 _ZN8__main__7add$241Eff_param_0,
.param .b32 _ZN8__main__7add$241Eff_param_1,
.param .b32 _ZN8__main__7add$241Eff_param_2
)
will be transformed to
__device__ __inline__ void GENERIC_BINARY_OP(
float* _ZN8__main__7add_241Eff_param_0,
float _ZN8__main__7add_241Eff_param_1,
float _ZN8__main__7add_241Eff_param_2
)
*/
std::string parse_function_header(std::string const& src);
/**
* @brief parse and transform input parameter list of the PTX code into the
* corresponding CUDA form
*
* @param src The input parameter list part of the PTX code
* @return The parsed CUDA input parameter list
*/
std::string parse_param_list(std::string const& src);
/**
* @brief parse and transform an input parameter line of the PTX code into the
* corresponding CUDA form
*
* @param src The input parameter line of the PTX code
* @return The parsed CUDA input parameter
*/
static std::string parse_param(std::string const& src);
/**
* @brief parse function body of the PTX code into statements by `;`s.
*
* @param src The function body of the PTX code
* @return The parsed statements
*/
std::vector<std::string> parse_function_body(std::string const& src);
/**
* @brief Remove leading white characters and call `parse_instruction`.
*
* @param src The statement to be parsed.
* @return The resulting CUDA statement.
*/
std::string parse_statement(std::string const& src);
/**
* @brief Convert the input PTX instruction into an inline PTX
* statement without changing (exceptions exist).
*
* Non-alphanumeric characters in register identifiers, except underscores, are replaced with
underscore. Example:
*
* fma.rn.f32 %f4, %f3, %f1, %f2
*
* ---> asm volatile (" fma.rn.f32 _f4, _f3, _f1, _f2;");
*
* If a register from the input parameters list is used in an instruction
* its type is inferred from the instruction and saved in the `input_arg_list`
* to be used in when parsing the function header.
*
* See the document at https://github.com/hummingtree/cudf/wiki/PTX-parser
* for the detailed description about the exceptions.
*
* @param src The statement to be parsed.
* @return The resulting CUDA inline PTX statement.
*/
std::string parse_instruction(std::string const& src);
/**
* @brief Convert register type (e.g. ".f32") to the corresponding
* C++ type (e.g. "float")
*
* See the implementation for details
*
* @param src The input code
* @return The resulting code
*/
static std::string register_type_to_cpp_type(std::string const& register_type);
/**
* @brief Convert register type (e.g. ".f32") to the corresponding
* constraint in inline PTX syntax (e.g. "f")
*
* See the implementation for details
*
* @param src The input code
* @return The resulting code
*/
static std::string register_type_to_contraint(std::string const& src);
/**
* @brief Replace any non-alphanumeric characters that are not underscore with
* underscore. The leading `[` and trailing `]` are exempted, e.g.
*
* "[t$5]" --> "[t_5]"
*
* @param src The input code
* @return The resulting code
*/
static std::string remove_nonalphanumeric(std::string const& src);
/**
* @brief Replace leading `%` in register identifiers with `_`.
*
* According to PTX document `%` can only appear at the start of a register
* identifier. At the same time `%` is not allowed in inline PTX. This function
* first looks for the register identifier and if it starts with `%` replaces it
* with `_`.
*
* @param src The input code
* @return The resulting code
*/
static std::string escape_percent(std::string const& src);
public:
ptx_parser() = delete;
/**
* @brief Constructor of the `ptx_parser` class
*
* @param ptx_ The input PTX code that contains the function whose
* CUDA is to be generated.
* @param function_name_ The function name of the output CUDA function
* @param output_arg_type_ The C++ type of the output parameter of the
* function.
* @param pointer_arg_list_ A list of the parameters that are pointers.
*/
ptx_parser(std::string const& ptx_,
std::string const& function_name_,
std::string const& output_arg_type_,
std::set<int> const& pointer_arg_list_);
// parse the source!!!
std::string parse();
};
/**
* @brief Parse and Transform a piece of PTX code that contains the implementation
* of a device function into a CUDA device function.
*
* @param src The input PTX code.
* @param function_name The User defined function that the output CUDA function
* will have.
* @param output_arg_type output_arg_type The C++ type of the output parameter of the
* function
* @param pointer_arg_list A list of the parameters that are pointers.
* @return The output CUDA device function
*/
inline std::string parse_single_function_ptx(std::string const& src,
std::string const& function_name,
std::string const& output_arg_type,
std::set<int> const& pointer_arg_list = {0})
{
ptx_parser instance(src, function_name, output_arg_type, pointer_arg_list);
return instance.parse();
}
/**
* @brief In a piece of CUDA code that contains the implementation
* of a device function, locate the function and replace its function name
* with the specified one.
*
* @param src The input CUDA code.
* @param function_name The User defined function that the output CUDA function
* will have.
* @return The output CUDA device function
*/
std::string parse_single_function_cuda(std::string const& src, std::string const& function_name);
} // namespace jit
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/partitioning/round_robin.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cmath> // for std::ceil()
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
namespace {
/**
* @brief Handles the "degenerate" case num_partitions >= num_rows.
*
* Specifically,
* If num_partitions == nrows:
* Then, offsets = [0..nrows-1]
* gather_row_indices = rotate [0..nrows-1] right by start_partition positions;
*
* If num_partitions > nrows:
* Then, let:
* dbg = generate a directed bipartite graph with num_partitions nodes and nrows edges,
* so that node j has an edge to node (j+start_partition) % num_partitions, for j = 0,...,nrows-1;
*
* transpose_dbg = transpose graph of dbg; (i.e., (i -> j) edge in dbg means (j -> i) edge in
* transpose);
*
* (offsets, indices) = (row_offsets, col_indices) of transpose_dbg;
* where (row_offsets, col_indices) are the CSR format of the graph;
*
* @param[in] input The input table to be round-robin partitioned
* @param[in] num_partitions Number of partitions for the table
* @param[in] start_partition Index of the 1st partition
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
*
* @returns A std::pair consisting of a unique_ptr to the partitioned table and the partition
* offsets for each partition within the table
*/
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> degenerate_partitions(
cudf::table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto nrows = input.num_rows();
// iterator for partition index rotated right by start_partition positions:
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition] __device__(auto index) {
return (index + num_partitions - start_partition) % num_partitions;
});
if (num_partitions == nrows) {
rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream);
thrust::sequence(rmm::exec_policy(stream), partition_offsets.begin(), partition_offsets.end());
auto uniq_tbl = cudf::detail::gather(input,
rotated_iter_begin,
rotated_iter_begin + nrows, // map
cudf::out_of_bounds_policy::DONT_CHECK,
stream,
mr);
return std::pair(std::move(uniq_tbl),
cudf::detail::make_std_vector_sync(partition_offsets, stream));
} else { //( num_partitions > nrows )
rmm::device_uvector<cudf::size_type> d_row_indices(nrows, stream);
// copy rotated right partition indexes that
// fall in the interval [0, nrows):
//(this relies on a _stable_ copy_if())
thrust::copy_if(rmm::exec_policy(stream),
rotated_iter_begin,
rotated_iter_begin + num_partitions,
d_row_indices.begin(),
[nrows] __device__(auto index) { return (index < nrows); });
//...and then use the result, d_row_indices, as gather map:
auto uniq_tbl = cudf::detail::gather(input,
d_row_indices.begin(),
d_row_indices.end(), // map
cudf::out_of_bounds_policy::DONT_CHECK,
stream,
mr);
// offsets (part 1: compute partition sizes);
// iterator for number of edges of the transposed bipartite graph;
// this composes rotated_iter transform (above) iterator with
// calculating number of edges of transposed bi-graph:
auto nedges_iter_begin = thrust::make_transform_iterator(
rotated_iter_begin, [nrows] __device__(auto index) { return (index < nrows ? 1 : 0); });
// offsets (part 2: compute partition offsets):
rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream);
thrust::exclusive_scan(rmm::exec_policy(stream),
nedges_iter_begin,
nedges_iter_begin + num_partitions,
partition_offsets.begin());
return std::pair(std::move(uniq_tbl),
cudf::detail::make_std_vector_sync(partition_offsets, stream));
}
}
} // namespace
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto nrows = input.num_rows();
CUDF_EXPECTS(num_partitions > 0, "Incorrect number of partitions. Must be greater than 0.");
CUDF_EXPECTS(start_partition < num_partitions,
"Incorrect start_partition index. Must be less than number of partitions.");
CUDF_EXPECTS(
start_partition >= 0,
"Incorrect start_partition index. Must be positive."); // since cudf::size_type is an alias for
// int32_t, it _can_ be negative
if (nrows == 0) {
return std::pair(empty_like(input), std::vector<size_type>(num_partitions, 0));
}
// handle degenerate case:
//
if (num_partitions >= nrows) {
return degenerate_partitions(input, num_partitions, start_partition, stream, mr);
}
auto np_max_size = nrows % num_partitions; // # partitions of max size
// handle case when nr `mod` np == 0;
// fix for bug: https://github.com/rapidsai/cudf/issues/4043
auto num_partitions_max_size = (np_max_size > 0 ? np_max_size : num_partitions);
cudf::size_type max_partition_size = std::ceil(
static_cast<double>(nrows) / static_cast<double>(num_partitions)); // max size of partitions
auto total_max_partitions_size = num_partitions_max_size * max_partition_size;
auto num_partitions_min_size = num_partitions - num_partitions_max_size;
// delta is the number of positions to rotate right
// the original range [0,1,...,n-1]
// and is calculated by accumulating the first
//`start_partition` partition sizes from the end;
// i.e.,
// the partition sizes array (of size p) being:
//[m,m,...,m,(m-1),...,(m-1)]
//(with num_partitions_max_size sizes `m` at the beginning;
// and (p-num_partitions_max_size) sizes `(m-1)` at the end)
// we accumulate the 1st `start_partition` entries from the end:
//
auto delta = (start_partition > num_partitions_min_size
? num_partitions_min_size * (max_partition_size - 1) +
(start_partition - num_partitions_min_size) * max_partition_size
: start_partition * (max_partition_size - 1));
auto iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[nrows,
num_partitions,
max_partition_size,
num_partitions_max_size,
total_max_partitions_size,
delta] __device__(auto index0) {
// rotate original index right by delta positions;
// this is the effect of applying start_partition:
//
auto rotated_index = (index0 + nrows - delta) % nrows;
// using rotated_index = given index0, rotated;
// the algorithm below calculates the src round-robin row,
// by calculating the partition_index and the index_within_partition:
//
auto index_within_partition =
(rotated_index <= total_max_partitions_size
? rotated_index % max_partition_size
: (rotated_index - total_max_partitions_size) % (max_partition_size - 1));
auto partition_index =
(rotated_index <= total_max_partitions_size
? rotated_index / max_partition_size
: num_partitions_max_size +
(rotated_index - total_max_partitions_size) / (max_partition_size - 1));
return num_partitions * index_within_partition + partition_index;
});
auto uniq_tbl = cudf::detail::gather(
input, iter_begin, iter_begin + nrows, cudf::out_of_bounds_policy::DONT_CHECK, stream, mr);
auto ret_pair = std::pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
// this has the effect of rotating the set of partition sizes
// right by start_partition positions:
//
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition, max_partition_size, num_partitions_max_size](auto index) {
return ((index + num_partitions - start_partition) % num_partitions < num_partitions_max_size
? max_partition_size
: max_partition_size - 1);
});
// then exclusive_scan on the resulting
// rotated partition sizes to get the partition offsets
// corresponding to start_partition:
// Since:
//"num_partitions is usually going to be relatively small
//(<1,000), as such, it's probably more expensive to do this on the device.
// Instead, do it on the host directly into the std::vector and avoid the memcpy." - JH
//
thrust::exclusive_scan(
thrust::host, rotated_iter_begin, rotated_iter_begin + num_partitions, ret_pair.second.begin());
return ret_pair;
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_FUNC_RANGE();
return detail::round_robin_partition(
input, num_partitions, start_partition, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/partitioning/partitioning.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/hashing/detail/murmurhash3_x86_32.cuh>
#include <cudf/partitioning.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cub/block/block_scan.cuh>
#include <cub/device/device_histogram.cuh>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
size_type const num_rows,
size_type const num_partitions,
partitioner_type const the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (tid < num_rows) {
auto const row_number = static_cast<size_type>(tid);
hash_value_type const row_hash_value = the_hasher(row_number);
size_type const partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
tid += stride;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
size_type const block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
size_type const write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
size_type const num_rows,
size_type const num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (tid < num_rows) {
auto const row_number = static_cast<size_type>(tid);
// Get partition number of this row
size_type const partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
size_type const row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
tid += stride;
}
}
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
size_type const num_rows,
size_type const num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
using BlockScan = cub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support up to 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (auto tid = cudf::detail::grid_1d::global_thread_id(); tid < num_rows;
tid += cudf::detail::grid_1d::grid_stride()) {
auto const row_number = static_cast<size_type>(tid);
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
copy_block_partitions<<<grid_size, OPTIMIZED_BLOCK_SIZE, smem, stream.value()>>>(
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_uvector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> gather_map(num_rows, stream);
copy_block_partitions_impl(sequence,
gather_map.begin(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType>
constexpr static bool is_copy_block_supported()
{
// The shared-memory used for fixed-width types in the copy_block_partitions_impl function
// will be too large for any DataType greater than int64_t.
return is_fixed_width<DataType>() && (sizeof(DataType) <= sizeof(int64_t));
}
template <typename DataType, CUDF_ENABLE_IF(is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
size_type const num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(
input.type(), input.size(), std::move(output), rmm::device_buffer{}, 0);
}
template <typename DataType, CUDF_ENABLE_IF(not is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
size_type const num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
auto gather_table = cudf::detail::gather(cudf::table_view({input}),
gather_map,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(gather_table->release().front());
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_uvector<size_type>(num_rows, stream);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
auto scanned_block_partition_sizes =
rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
// Holds the total number of rows in each partition
auto global_partition_sizes = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_partitions, stream, rmm::mr::get_current_device_resource());
auto row_partition_offset = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_rows, stream, rmm::mr::get_current_device_resource());
auto const row_hasher = experimental::row::hash::row_hasher(table_to_hash, stream);
auto const hasher =
row_hasher.device_hasher<hash_function>(nullate::DYNAMIC{hash_has_nulls}, seed);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
thrust::exclusive_scan(rmm::exec_policy(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
global_partition_sizes.begin());
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
auto const partition_offsets =
cudf::detail::make_std_vector_async(global_partition_sizes, stream);
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream,
mr);
});
if (has_nested_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::make_unique<table>(std::move(output_cols)), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data()};
compute_row_output_locations<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(input, row_partition_numbers, input, stream, mr);
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_uvector<size_type> histogram(num_partitions + 1, stream);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(
rmm::exec_policy(stream), histogram.begin(), histogram.end(), histogram.begin());
// Copy offsets to host before the transform below modifies the histogram
auto const partition_offsets = cudf::detail::make_std_vector_sync(histogram, stream);
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_uvector<size_type> scatter_map(partition_map.size(), stream);
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered = detail::scatter(t, scatter_map, t, stream, mr);
return std::pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType, typename... Args>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(Args&&...) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace {
/**
* @brief This hash function simply returns the input value cast to the
* result_type of the functor.
*/
template <typename Key>
struct IdentityHash {
using result_type = uint32_t;
constexpr IdentityHash() = default;
constexpr IdentityHash(uint32_t) {}
template <typename return_type = result_type>
constexpr std::enable_if_t<!std::is_arithmetic_v<Key>, return_type> operator()(
Key const& key) const
{
CUDF_UNREACHABLE("IdentityHash does not support this data type");
}
template <typename return_type = result_type>
constexpr std::enable_if_t<std::is_arithmetic_v<Key>, return_type> operator()(
Key const& key) const
{
return static_cast<result_type>(key);
}
};
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::pair(empty_like(input), std::vector<size_type>(num_partitions, 0));
}
if (has_nested_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, seed, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, seed, stream, mr);
}
}
} // namespace
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
// The output offsets vector must have size `num_partitions + 1` as per documentation.
return std::pair(empty_like(t), std::vector<size_type>(num_partitions + 1, 0));
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (size_type const& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::hash_partition<cudf::detail::IdentityHash>(
input, columns_to_hash, num_partitions, seed, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::hash_partition<cudf::hashing::detail::MurmurHash3_x86_32>(
input, columns_to_hash, num_partitions, seed, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/text/generate_ngrams.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/detail/generate_ngrams.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sizes_to_offsets_iterator.cuh>
#include <cudf/hashing/detail/murmurhash3_x86_32.cuh>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Generate ngrams from strings column.
*
* Adjacent strings are concatenated with the provided separator.
* The number of adjacent strings join depends on the specified ngrams value.
* For example: for bigrams (ngrams=2), pairs of strings are concatenated.
*/
struct ngram_generator_fn {
cudf::column_device_view const d_strings;
cudf::size_type ngrams;
cudf::string_view const d_separator;
cudf::size_type* d_offsets{};
char* d_chars{};
/**
* @brief Build ngram for each string.
*
* This is called for each thread and processed for each string.
* Each string will produce the number of ngrams specified.
*
* @param idx Index of the kernel thread.
* @return Number of bytes required for the string for this thread.
*/
__device__ void operator()(cudf::size_type idx)
{
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
cudf::size_type bytes = 0;
for (cudf::size_type n = 0; n < ngrams; ++n) {
auto const d_str = d_strings.element<cudf::string_view>(n + idx);
bytes += d_str.size_bytes();
if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_str);
if ((n + 1) >= ngrams) continue;
bytes += d_separator.size_bytes();
if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_separator);
}
if (!d_chars) d_offsets[idx] = bytes;
}
};
} // namespace
std::unique_ptr<cudf::column> generate_ngrams(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
cudf::string_scalar const& separator,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(separator.is_valid(stream), "Parameter separator must be valid");
cudf::string_view const d_separator(separator.data(), separator.size());
CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater");
auto strings_count = strings.size();
if (strings_count == 0) // if no strings, return an empty column
return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// first create a new offsets vector removing nulls and empty strings from the input column
std::unique_ptr<cudf::column> non_empty_offsets_column = [&] {
cudf::column_view offsets_view(cudf::data_type{cudf::type_id::INT32},
strings_count + 1,
strings.offsets_begin(),
nullptr,
0);
auto table_offsets = cudf::detail::copy_if(
cudf::table_view({offsets_view}),
[d_strings, strings_count] __device__(cudf::size_type idx) {
if (idx == strings_count) return true;
if (d_strings.is_null(idx)) return false;
return !d_strings.element<cudf::string_view>(idx).empty();
},
stream,
rmm::mr::get_current_device_resource())
->release();
strings_count = table_offsets.front()->size() - 1;
auto result = std::move(table_offsets.front());
return result;
}(); // this allows freeing the temporary table_offsets
CUDF_EXPECTS(strings_count >= ngrams, "Insufficient number of strings to generate ngrams");
// create a temporary column view from the non-empty offsets and chars column views
cudf::column_view strings_view(cudf::data_type{cudf::type_id::STRING},
strings_count,
nullptr,
nullptr,
0,
0,
{non_empty_offsets_column->view(), strings.chars()});
strings_column = cudf::column_device_view::create(strings_view, stream);
d_strings = *strings_column;
// compute the number of strings of ngrams
auto const ngrams_count = strings_count - ngrams + 1;
auto children = cudf::strings::detail::make_strings_children(
ngram_generator_fn{d_strings, ngrams, d_separator}, ngrams_count, stream, mr);
// make the output strings column from the offsets and chars column
return cudf::make_strings_column(
ngrams_count, std::move(children.first), std::move(children.second), 0, rmm::device_buffer{});
}
} // namespace detail
std::unique_ptr<cudf::column> generate_ngrams(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
cudf::string_scalar const& separator,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::generate_ngrams(strings, ngrams, separator, stream, mr);
}
namespace detail {
namespace {
/**
* @brief Generate character ngrams for each string
*
* Each string produces many strings depending on the ngram width and the string size.
* This functor can be used with `make_strings_children` to build the offsets and
* the chars child columns.
*/
struct character_ngram_generator_fn {
cudf::column_device_view const d_strings;
cudf::size_type ngrams;
cudf::size_type const* d_ngram_offsets{};
cudf::size_type* d_offsets{};
char* d_chars{};
__device__ void operator()(cudf::size_type idx)
{
if (d_strings.is_null(idx)) return;
auto const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
auto itr = d_str.begin();
auto const ngram_offset = d_ngram_offsets[idx];
auto const ngram_count = d_ngram_offsets[idx + 1] - ngram_offset;
auto d_sizes = d_offsets + ngram_offset;
auto out_ptr = d_chars ? d_chars + *d_sizes : nullptr;
for (cudf::size_type n = 0; n < ngram_count; ++n, ++itr) {
auto const begin = itr.byte_offset();
auto const end = (itr + ngrams).byte_offset();
if (d_chars) {
out_ptr =
cudf::strings::detail::copy_and_increment(out_ptr, d_str.data() + begin, (end - begin));
} else {
*d_sizes++ = end - begin;
}
}
}
};
} // namespace
std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater");
auto const strings_count = strings.size();
if (strings_count == 0) // if no strings, return an empty column
return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
auto const strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto const d_strings = *strings_column;
// create a vector of ngram offsets for each string
rmm::device_uvector<cudf::size_type> ngram_offsets(strings_count + 1, stream);
thrust::transform_exclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count + 1),
ngram_offsets.begin(),
[d_strings, strings_count, ngrams] __device__(auto idx) {
if (d_strings.is_null(idx) || (idx == strings_count)) return 0;
auto const length = d_strings.element<cudf::string_view>(idx).length();
return std::max(0, static_cast<cudf::size_type>(length + 1 - ngrams));
},
cudf::size_type{0},
thrust::plus<cudf::size_type>());
// total ngrams count is the last entry
cudf::size_type const total_ngrams = ngram_offsets.back_element(stream);
CUDF_EXPECTS(total_ngrams > 0,
"Insufficient number of characters in each string to generate ngrams");
character_ngram_generator_fn generator{d_strings, ngrams, ngram_offsets.data()};
auto [offsets_column, chars_column] = cudf::strings::detail::make_strings_children(
generator, strings_count, total_ngrams, stream, mr);
return cudf::make_strings_column(
total_ngrams, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{});
}
namespace {
/**
* @brief Computes the hash of each character ngram
*
* Each thread processes a single string. Substrings are resolved for every character
* of the string and hashed.
*/
struct character_ngram_hash_fn {
cudf::column_device_view const d_strings;
cudf::size_type ngrams;
cudf::size_type const* d_ngram_offsets;
cudf::hash_value_type* d_results;
__device__ void operator()(cudf::size_type idx) const
{
if (d_strings.is_null(idx)) return;
auto const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
auto itr = d_str.begin();
auto const ngram_offset = d_ngram_offsets[idx];
auto const ngram_count = d_ngram_offsets[idx + 1] - ngram_offset;
auto const hasher = cudf::hashing::detail::MurmurHash3_x86_32<cudf::string_view>{0};
auto d_hashes = d_results + ngram_offset;
for (cudf::size_type n = 0; n < ngram_count; ++n, ++itr) {
auto const begin = itr.byte_offset();
auto const end = (itr + ngrams).byte_offset();
auto const ngram = cudf::string_view(d_str.data() + begin, end - begin);
*d_hashes++ = hasher(ngram);
}
}
};
} // namespace
std::unique_ptr<cudf::column> hash_character_ngrams(cudf::strings_column_view const& input,
cudf::size_type ngrams,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(ngrams >= 2, "Parameter ngrams should be an integer value of 2 or greater");
auto output_type = cudf::data_type{cudf::type_to_id<cudf::hash_value_type>()};
if (input.is_empty()) { return cudf::make_empty_column(output_type); }
auto const d_strings = cudf::column_device_view::create(input.parent(), stream);
// build offsets column by computing the number of ngrams per string
auto sizes_itr = cudf::detail::make_counting_transform_iterator(
0, [d_strings = *d_strings, ngrams] __device__(auto idx) {
if (d_strings.is_null(idx)) { return 0; }
auto const length = d_strings.element<cudf::string_view>(idx).length();
return std::max(0, static_cast<cudf::size_type>(length + 1 - ngrams));
});
auto [offsets, total_ngrams] =
cudf::detail::make_offsets_child_column(sizes_itr, sizes_itr + input.size(), stream, mr);
auto d_offsets = offsets->view().data<cudf::size_type>();
CUDF_EXPECTS(total_ngrams > 0,
"Insufficient number of characters in each string to generate ngrams");
// compute ngrams and build hashes
auto hashes =
cudf::make_numeric_column(output_type, total_ngrams, cudf::mask_state::UNALLOCATED, stream, mr);
auto d_hashes = hashes->mutable_view().data<cudf::hash_value_type>();
character_ngram_hash_fn generator{*d_strings, ngrams, d_offsets, d_hashes};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
input.size(),
generator);
return make_lists_column(
input.size(), std::move(offsets), std::move(hashes), 0, rmm::device_buffer{}, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::generate_character_ngrams(strings, ngrams, stream, mr);
}
std::unique_ptr<cudf::column> hash_character_ngrams(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::hash_character_ngrams(strings, ngrams, stream, mr);
}
} // namespace nvtext
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/text/ngrams_tokenize.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <text/utilities/tokenize_ops.cuh>
#include <nvtext/detail/tokenize.hpp>
#include <nvtext/ngrams_tokenize.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
#include <stdexcept>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief This records the byte positions of each token within each string.
*
* The position values are recorded since we need to reference tokens
* within a string multiple times to generate the ngrams. For example,
* to generate tri-grams for string "aa b ccc dd" requires creating
* the following two strings ["aa_b_ccc","b_ccc_dd"]. Notice the
* tokens "b" and "ccc" needed to be copied twice for this string.
*
* Most of the work is done in the characters_tokenizer locating the tokens.
* This functor simply records the byte positions in the d_token_positions
* member.
*/
struct string_tokens_positions_fn {
cudf::column_device_view const d_strings; // strings to tokenize
cudf::string_view const d_delimiter; // delimiter to tokenize around
cudf::size_type const* d_token_offsets; // offsets into the d_token_positions for each string
position_pair* d_token_positions; // token positions in each string
__device__ void operator()(cudf::size_type idx)
{
if (d_strings.is_null(idx)) return;
cudf::string_view d_str = d_strings.element<cudf::string_view>(idx);
// create tokenizer for this string
characters_tokenizer tokenizer(d_str, d_delimiter);
// record the token positions for this string
cudf::size_type token_index = 0;
auto token_positions = d_token_positions + d_token_offsets[idx];
while (tokenizer.next_token())
token_positions[token_index++] = tokenizer.token_byte_positions();
}
};
/**
* @brief Generate the ngrams for each string.
*
* The ngrams for each string are placed contiguously within the section of memory
* assigned for the input string. At the same time, the size of each ngram is recorded
* in order to build the output offsets column.
*
* This functor can be called to compute the size of memory needed to write out
* each set of ngrams per string. Once the memory offsets (d_chars_offsets) are
* set and the output memory is allocated (d_chars), the ngrams for each string
* can be generated into the output buffer.
*/
struct ngram_builder_fn {
cudf::column_device_view const d_strings; // strings to generate ngrams from
cudf::string_view const d_separator; // separator to place between them 'grams
cudf::size_type const ngrams; // ngram number to generate (2=bi-gram, 3=tri-gram)
cudf::size_type const* d_token_offsets; // offsets for token position for each string
position_pair const* d_token_positions; // token positions for each string
cudf::size_type const* d_chars_offsets{}; // offsets for each string's ngrams
char* d_chars{}; // write ngram strings to here
cudf::size_type const* d_ngram_offsets{}; // offsets for sizes of each string's ngrams
cudf::size_type* d_ngram_sizes{}; // write ngram sizes to here
__device__ cudf::size_type operator()(cudf::size_type idx)
{
if (d_strings.is_null(idx)) { return 0; }
auto const d_str = d_strings.element<cudf::string_view>(idx);
auto const token_positions = d_token_positions + d_token_offsets[idx];
auto const token_count = d_token_offsets[idx + 1] - d_token_offsets[idx];
cudf::size_type nbytes = 0; // total number of output bytes needed for this string
cudf::size_type ngram_index = 0;
auto out_ptr = d_chars ? d_chars + d_chars_offsets[idx] : nullptr;
auto d_sizes = d_ngram_sizes ? d_ngram_sizes + d_ngram_offsets[idx] : nullptr;
// for ngrams=2, this will turn string "a b c d e" into "a_bb_cc_dd_e"
for (cudf::size_type token_index = (ngrams - 1); token_index < token_count; ++token_index) {
cudf::size_type length = 0; // calculate size of each ngram in bytes
for (cudf::size_type n = (ngrams - 1); n >= 0; --n) // sliding window of tokens
{
auto const item = token_positions[token_index - n];
length += item.second - item.first;
if (out_ptr) {
out_ptr = cudf::strings::detail::copy_and_increment(
out_ptr, d_str.data() + item.first, item.second - item.first);
}
if (n > 0) { // include the separator (except for the last one)
if (out_ptr) { out_ptr = cudf::strings::detail::copy_string(out_ptr, d_separator); }
length += d_separator.size_bytes();
}
}
if (d_sizes) { d_sizes[ngram_index++] = length; }
nbytes += length;
}
return nbytes;
}
};
} // namespace
// detail APIs
std::unique_ptr<cudf::column> ngrams_tokenize(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
cudf::string_scalar const& delimiter,
cudf::string_scalar const& separator,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
cudf::string_view d_delimiter(delimiter.data(), delimiter.size());
CUDF_EXPECTS(separator.is_valid(stream), "Parameter separator must be valid");
cudf::string_view d_separator(separator.data(), separator.size());
CUDF_EXPECTS(ngrams >= 1, "Parameter ngrams should be an integer value of 1 or greater");
if (ngrams == 1) // this is just a straight tokenize
return tokenize(strings, delimiter, stream, mr);
auto strings_count = strings.size();
if (strings.is_empty()) return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// Example for comments with ngrams=2
// ["a bb ccc","dd e"] => ["a_bb", "bb_ccc", "dd_e"]
// first, get the number of tokens per string to get the token-offsets
// Ex. token-counts = [3,2]; token-offsets = [0,3,5]
rmm::device_uvector<cudf::size_type> token_offsets(strings_count + 1, stream);
auto d_token_offsets = token_offsets.data();
thrust::transform_inclusive_scan(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_token_offsets + 1,
strings_tokenizer{d_strings, d_delimiter},
thrust::plus<cudf::size_type>());
token_offsets.set_element_to_zero_async(0, stream);
auto const total_tokens = token_offsets.back_element(stream); // Ex. 5 tokens
// get the token positions (in bytes) per string
// Ex. start/end pairs: [(0,1),(2,4),(5,8), (0,2),(3,4)]
rmm::device_uvector<position_pair> token_positions(total_tokens, stream);
auto d_token_positions = token_positions.data();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count,
string_tokens_positions_fn{d_strings, d_delimiter, d_token_offsets, d_token_positions});
// compute the number of ngrams per string to get the total number of ngrams to generate
// Ex. ngram-counts = [2,1]; ngram-offsets = [0,2,3]; total = 3 bigrams
rmm::device_uvector<cudf::size_type> ngram_offsets(strings_count + 1, stream);
auto d_ngram_offsets = ngram_offsets.data();
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_ngram_offsets + 1,
[d_token_offsets, ngrams] __device__(cudf::size_type idx) {
auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx];
return (token_count >= ngrams) ? token_count - ngrams + 1 : 0;
},
thrust::plus{});
ngram_offsets.set_element_to_zero_async(0, stream);
auto const total_ngrams = ngram_offsets.back_element(stream);
// Compute the total size of the ngrams for each string (not for each ngram)
// Ex. 2 bigrams in 1st string total to 10 bytes; 1 bigram in 2nd string is 4 bytes
// => sizes = [10,4]; offsets = [0,10,14]
//
// This produces a set of offsets for the output memory where we can build adjacent
// ngrams for each string.
// Ex. bigram for first string produces 2 bigrams ("a_bb","bb_ccc") which
// is built in memory like this: "a_bbbb_ccc"
rmm::device_uvector<cudf::size_type> chars_offsets(strings_count + 1, stream);
// First compute the output sizes for each string (this not the final output result)
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
chars_offsets.begin(),
ngram_builder_fn{d_strings, d_separator, ngrams, d_token_offsets, d_token_positions});
// Convert the sizes to offsets
auto const output_chars_size = cudf::detail::sizes_to_offsets(
chars_offsets.begin(), chars_offsets.end(), chars_offsets.begin(), stream);
CUDF_EXPECTS(
output_chars_size <= static_cast<int64_t>(std::numeric_limits<cudf::size_type>::max()),
"Size of output exceeds the column size limit",
std::overflow_error);
// This will contain the size in bytes of each ngram to generate
rmm::device_uvector<cudf::size_type> ngram_sizes(total_ngrams, stream);
// build output chars column
auto chars_column = cudf::strings::detail::create_chars_child_column(
static_cast<cudf::size_type>(output_chars_size), stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
// Generate the ngrams into the chars column data buffer.
// The ngram_builder_fn functor also fills the ngram_sizes vector with the
// size of each ngram.
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count,
ngram_builder_fn{d_strings,
d_separator,
ngrams,
d_token_offsets,
d_token_positions,
chars_offsets.data(),
d_chars,
d_ngram_offsets,
ngram_sizes.data()});
// build the offsets column -- converting the ngram sizes into offsets
auto offsets_column = std::get<0>(
cudf::detail::make_offsets_child_column(ngram_sizes.begin(), ngram_sizes.end(), stream, mr));
chars_column->set_null_count(0);
offsets_column->set_null_count(0);
// create the output strings column
return make_strings_column(
total_ngrams, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{});
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> ngrams_tokenize(cudf::strings_column_view const& strings,
cudf::size_type ngrams,
cudf::string_scalar const& delimiter,
cudf::string_scalar const& separator,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::ngrams_tokenize(strings, ngrams, delimiter, separator, stream, mr);
}
} // namespace nvtext
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/text/tokenize.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <text/utilities/tokenize_ops.cuh>
#include <nvtext/detail/tokenize.hpp>
#include <nvtext/tokenize.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace nvtext {
namespace detail {
namespace {
// common pattern for token_count functions
template <typename TokenCounter>
std::unique_ptr<cudf::column> token_count_fn(cudf::size_type strings_count,
TokenCounter tokenizer,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// create output column
auto token_counts =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto d_token_counts = token_counts->mutable_view().data<cudf::size_type>();
// add the counts to the column
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_token_counts,
tokenizer);
return token_counts;
}
// common pattern for tokenize functions
template <typename Tokenizer>
std::unique_ptr<cudf::column> tokenize_fn(cudf::size_type strings_count,
Tokenizer tokenizer,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// get the number of tokens in each string
auto const token_counts =
token_count_fn(strings_count, tokenizer, stream, rmm::mr::get_current_device_resource());
auto d_token_counts = token_counts->view();
// create token-index offsets from the counts
rmm::device_uvector<cudf::size_type> token_offsets(strings_count + 1, stream);
thrust::inclusive_scan(rmm::exec_policy(stream),
d_token_counts.template begin<cudf::size_type>(),
d_token_counts.template end<cudf::size_type>(),
token_offsets.begin() + 1);
token_offsets.set_element_to_zero_async(0, stream);
auto const total_tokens = token_offsets.back_element(stream);
// build a list of pointers to each token
rmm::device_uvector<string_index_pair> tokens(total_tokens, stream);
// now go get the tokens
tokenizer.d_offsets = token_offsets.data();
tokenizer.d_tokens = tokens.data();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count,
tokenizer);
// create the strings column using the tokens pointers
return cudf::strings::detail::make_strings_column(tokens.begin(), tokens.end(), stream, mr);
}
} // namespace
// detail APIs
// zero or more character tokenizer
std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
cudf::string_view d_delimiter(delimiter.data(), delimiter.size());
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
return tokenize_fn(strings.size(), strings_tokenizer{*strings_column, d_delimiter}, stream, mr);
}
// zero or more character token counter
std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
cudf::string_view d_delimiter(delimiter.data(), delimiter.size());
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
return token_count_fn(
strings.size(), strings_tokenizer{*strings_column, d_delimiter}, stream, mr);
}
// one or more string delimiter tokenizer
std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiters.size() > 0, "Parameter delimiters must not be empty");
CUDF_EXPECTS(!delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(), stream);
return tokenize_fn(
strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
stream,
mr);
}
// one or more string delimiter token counter
std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiters.size() > 0, "Parameter delimiters must not be empty");
CUDF_EXPECTS(!delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(), stream);
return token_count_fn(
strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
stream,
mr);
}
// tokenize on every character
std::unique_ptr<cudf::column> character_tokenize(cudf::strings_column_view const& strings_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings_column.size();
if (strings_count == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
}
auto offsets = strings_column.offsets();
auto offset = cudf::detail::get_value<cudf::size_type>(offsets, strings_column.offset(), stream);
auto chars_bytes = cudf::detail::get_value<cudf::size_type>(
offsets, strings_column.offset() + strings_count, stream) -
offset;
auto d_chars = strings_column.chars().data<uint8_t>(); // unsigned is necessary for checking bits
d_chars += offset;
// To minimize memory, count the number of characters so we can
// build the output offsets without an intermediate buffer.
// In the worst case each byte is a character so the output is 4x the input.
cudf::size_type num_characters = thrust::count_if(
rmm::exec_policy(stream), d_chars, d_chars + chars_bytes, [] __device__(uint8_t byte) {
return cudf::strings::detail::is_begin_utf8_char(byte);
});
// no characters check -- this could happen in all-empty or all-null strings column
if (num_characters == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
}
// create output offsets column
// -- conditionally copy a counting iterator where
// the first byte of each character is located
auto offsets_column =
cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
num_characters + 1,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto d_new_offsets = offsets_column->mutable_view().begin<cudf::size_type>();
thrust::copy_if(
rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(chars_bytes + 1),
d_new_offsets,
[d_chars, chars_bytes] __device__(auto idx) {
// this will also set the final value to the size chars_bytes
return idx < chars_bytes ? cudf::strings::detail::is_begin_utf8_char(d_chars[idx]) : true;
});
// create the output chars column -- just a copy of the input's chars column
cudf::column_view chars_view(
cudf::data_type{cudf::type_id::INT8}, chars_bytes, d_chars, nullptr, 0);
auto chars_column = std::make_unique<cudf::column>(chars_view, stream, mr);
// return new strings column
return cudf::make_strings_column(
num_characters, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{});
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& input,
cudf::string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::tokenize(input, delimiter, stream, mr);
}
std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& input,
cudf::strings_column_view const& delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::tokenize(input, delimiters, stream, mr);
}
std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& input,
cudf::string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_tokens(input, delimiter, stream, mr);
}
std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& input,
cudf::strings_column_view const& delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_tokens(input, delimiters, stream, mr);
}
std::unique_ptr<cudf::column> character_tokenize(cudf::strings_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::character_tokenize(input, stream, mr);
}
} // namespace nvtext
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/text/normalize.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <text/subword/detail/data_normalizer.hpp>
#include <text/subword/detail/tokenizer_utils.cuh>
#include <text/utilities/tokenize_ops.cuh>
#include <nvtext/normalize.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <limits>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Normalize spaces in a strings column.
*
* Repeated whitespace (code-point <= ' ') is replaced with a single space.
* Also, whitespace is trimmed from the beginning and end of each string.
*
* This functor can be called to compute the output size in bytes
* of each string and then called again to fill in the allocated buffer.
*/
struct normalize_spaces_fn {
cudf::column_device_view const d_strings; // strings to normalize
cudf::size_type* d_offsets{}; // offsets into d_chars
char* d_chars{}; // output buffer for characters
__device__ void operator()(cudf::size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
cudf::string_view const single_space(" ", 1);
auto const d_str = d_strings.element<cudf::string_view>(idx);
char* buffer = d_chars ? d_chars + d_offsets[idx] : nullptr;
char* optr = buffer; // running output pointer
cudf::size_type nbytes = 0; // holds the number of bytes per output string
// create a tokenizer for this string with whitespace delimiter (default)
characters_tokenizer tokenizer(d_str);
// this will retrieve tokens automatically skipping runs of whitespace
while (tokenizer.next_token()) {
auto const token_pos = tokenizer.token_byte_positions();
auto const token =
cudf::string_view(d_str.data() + token_pos.first, token_pos.second - token_pos.first);
if (optr) {
// prepend space unless we are at the beginning
if (optr != buffer) { optr = cudf::strings::detail::copy_string(optr, single_space); }
// write token to output buffer
thrust::copy_n(thrust::seq, token.data(), token.size_bytes(), optr);
optr += token.size_bytes();
}
nbytes += token.size_bytes() + 1; // token size plus a single space
}
// remove trailing space
if (!d_chars) d_offsets[idx] = (nbytes > 0) ? nbytes - 1 : 0;
}
};
// code-point to multi-byte range limits
constexpr uint32_t UTF8_1BYTE = 0x0080;
constexpr uint32_t UTF8_2BYTE = 0x0800;
constexpr uint32_t UTF8_3BYTE = 0x01'0000;
/**
* @brief Convert code-point arrays into UTF-8 bytes for each string.
*/
struct codepoint_to_utf8_fn {
cudf::column_device_view const d_strings; // input strings
uint32_t const* cp_data; // full code-point array
cudf::size_type const* d_cp_offsets{}; // offsets to each string's code-point array
cudf::size_type* d_offsets{}; // offsets for the output strings
char* d_chars{}; // buffer for the output strings column
/**
* @brief Return the number of bytes for the output string given its code-point array.
*
* @param str_cps code-points for the string
* @param count number of code-points in `str_cps`
* @return Number of bytes required for the output
*/
__device__ cudf::size_type compute_output_size(uint32_t const* str_cps, uint32_t count)
{
return thrust::transform_reduce(
thrust::seq,
str_cps,
str_cps + count,
[](auto cp) { return 1 + (cp >= UTF8_1BYTE) + (cp >= UTF8_2BYTE) + (cp >= UTF8_3BYTE); },
0,
thrust::plus());
}
__device__ void operator()(cudf::size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const offset = d_cp_offsets[idx];
auto const count = d_cp_offsets[idx + 1] - offset; // number of code-points
auto str_cps = cp_data + offset; // code-points for this string
if (!d_chars) {
d_offsets[idx] = compute_output_size(str_cps, count);
return;
}
// convert each code-point to 1-4 UTF-8 encoded bytes
char* out_ptr = d_chars + d_offsets[idx];
for (uint32_t jdx = 0; jdx < count; ++jdx) {
uint32_t code_point = *str_cps++;
if (code_point < UTF8_1BYTE) // ASCII range
*out_ptr++ = static_cast<char>(code_point);
else if (code_point < UTF8_2BYTE) { // create two-byte UTF-8
// b00001xxx:byyyyyyyy => b110xxxyy:b10yyyyyy
*out_ptr++ = static_cast<char>((((code_point << 2) & 0x00'1F00) | 0x00'C000) >> 8);
*out_ptr++ = static_cast<char>((code_point & 0x3F) | 0x0080);
} else if (code_point < UTF8_3BYTE) { // create three-byte UTF-8
// bxxxxxxxx:byyyyyyyy => b1110xxxx:b10xxxxyy:b10yyyyyy
*out_ptr++ = static_cast<char>((((code_point << 4) & 0x0F'0000) | 0x00E0'0000) >> 16);
*out_ptr++ = static_cast<char>((((code_point << 2) & 0x00'3F00) | 0x00'8000) >> 8);
*out_ptr++ = static_cast<char>((code_point & 0x3F) | 0x0080);
} else { // create four-byte UTF-8
// maximum code-point value is 0x0011'0000
// b000xxxxx:byyyyyyyy:bzzzzzzzz => b11110xxx:b10xxyyyy:b10yyyyzz:b10zzzzzz
*out_ptr++ = static_cast<char>((((code_point << 6) & 0x0700'0000u) | 0xF000'0000u) >> 24);
*out_ptr++ = static_cast<char>((((code_point << 4) & 0x003F'0000u) | 0x0080'0000u) >> 16);
*out_ptr++ = static_cast<char>((((code_point << 2) & 0x00'3F00u) | 0x00'8000u) >> 8);
*out_ptr++ = static_cast<char>((code_point & 0x3F) | 0x0080);
}
}
}
};
} // namespace
// detail API
std::unique_ptr<cudf::column> normalize_spaces(cudf::strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
// create device column
auto d_strings = cudf::column_device_view::create(strings.parent(), stream);
// build offsets and children using the normalize_space_fn
auto children = cudf::strings::detail::make_strings_children(
normalize_spaces_fn{*d_strings}, strings.size(), stream, mr);
return cudf::make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
/**
* @copydoc nvtext::normalize_characters
*/
std::unique_ptr<cudf::column> normalize_characters(cudf::strings_column_view const& strings,
bool do_lower_case,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
// create the normalizer and call it
auto result = [&] {
auto const cp_metadata = get_codepoint_metadata(stream);
auto const aux_table = get_aux_codepoint_data(stream);
auto const normalizer = data_normalizer(cp_metadata.data(), aux_table.data(), do_lower_case);
auto const offsets = strings.offsets();
auto const d_offsets = offsets.data<cudf::size_type>() + strings.offset();
auto const offset = cudf::detail::get_value<cudf::size_type>(offsets, strings.offset(), stream);
auto const d_chars = strings.chars().data<char>() + offset;
return normalizer.normalize(d_chars, d_offsets, strings.size(), stream);
}();
CUDF_EXPECTS(
result.first->size() < static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()),
"output exceeds the column size limit",
std::overflow_error);
// convert the result into a strings column
// - the cp_chars are the new 4-byte code-point values for all the characters in the output
// - the cp_offsets identify which code-points go with which strings
uint32_t const* cp_chars = result.first->data();
cudf::size_type const* cp_offsets = result.second->data();
auto d_strings = cudf::column_device_view::create(strings.parent(), stream);
// build offsets and children using the codepoint_to_utf8_fn
auto children = cudf::strings::detail::make_strings_children(
codepoint_to_utf8_fn{*d_strings, cp_chars, cp_offsets}, strings.size(), stream, mr);
return cudf::make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> normalize_spaces(cudf::strings_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::normalize_spaces(input, stream, mr);
}
/**
* @copydoc nvtext::normalize_characters
*/
std::unique_ptr<cudf::column> normalize_characters(cudf::strings_column_view const& input,
bool do_lower_case,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::normalize_characters(input, do_lower_case, stream, mr);
}
} // namespace nvtext
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.